|
@@ -7,6 +7,7 @@
|
|
|
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
|
|
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
|
|
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
|
|
|
+ * Copyright (C) 2009 Google, Inc., Stephane Eranian
|
|
|
*
|
|
|
* For licencing details see kernel-base/COPYING
|
|
|
*/
|
|
@@ -22,6 +23,7 @@
|
|
|
#include <linux/uaccess.h>
|
|
|
#include <linux/highmem.h>
|
|
|
#include <linux/cpu.h>
|
|
|
+#include <linux/bitops.h>
|
|
|
|
|
|
#include <asm/apic.h>
|
|
|
#include <asm/stacktrace.h>
|
|
@@ -68,26 +70,59 @@ struct debug_store {
|
|
|
u64 pebs_event_reset[MAX_PEBS_EVENTS];
|
|
|
};
|
|
|
|
|
|
+struct event_constraint {
|
|
|
+ union {
|
|
|
+ unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
|
|
+ u64 idxmsk64[1];
|
|
|
+ };
|
|
|
+ int code;
|
|
|
+ int cmask;
|
|
|
+ int weight;
|
|
|
+};
|
|
|
+
|
|
|
+struct amd_nb {
|
|
|
+ int nb_id; /* NorthBridge id */
|
|
|
+ int refcnt; /* reference count */
|
|
|
+ struct perf_event *owners[X86_PMC_IDX_MAX];
|
|
|
+ struct event_constraint event_constraints[X86_PMC_IDX_MAX];
|
|
|
+};
|
|
|
+
|
|
|
struct cpu_hw_events {
|
|
|
- struct perf_event *events[X86_PMC_IDX_MAX];
|
|
|
- unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
|
|
+ struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
|
|
|
unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
|
|
unsigned long interrupts;
|
|
|
int enabled;
|
|
|
struct debug_store *ds;
|
|
|
-};
|
|
|
|
|
|
-struct event_constraint {
|
|
|
- unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
|
|
- int code;
|
|
|
+ int n_events;
|
|
|
+ int n_added;
|
|
|
+ int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
|
|
|
+ u64 tags[X86_PMC_IDX_MAX];
|
|
|
+ struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
|
|
|
+ struct amd_nb *amd_nb;
|
|
|
};
|
|
|
|
|
|
-#define EVENT_CONSTRAINT(c, m) { .code = (c), .idxmsk[0] = (m) }
|
|
|
-#define EVENT_CONSTRAINT_END { .code = 0, .idxmsk[0] = 0 }
|
|
|
+#define __EVENT_CONSTRAINT(c, n, m, w) {\
|
|
|
+ { .idxmsk64[0] = (n) }, \
|
|
|
+ .code = (c), \
|
|
|
+ .cmask = (m), \
|
|
|
+ .weight = (w), \
|
|
|
+}
|
|
|
+
|
|
|
+#define EVENT_CONSTRAINT(c, n, m) \
|
|
|
+ __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
|
|
|
+
|
|
|
+#define INTEL_EVENT_CONSTRAINT(c, n) \
|
|
|
+ EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
|
|
|
|
|
|
-#define for_each_event_constraint(e, c) \
|
|
|
- for ((e) = (c); (e)->idxmsk[0]; (e)++)
|
|
|
+#define FIXED_EVENT_CONSTRAINT(c, n) \
|
|
|
+ EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK)
|
|
|
|
|
|
+#define EVENT_CONSTRAINT_END \
|
|
|
+ EVENT_CONSTRAINT(0, 0, 0)
|
|
|
+
|
|
|
+#define for_each_event_constraint(e, c) \
|
|
|
+ for ((e) = (c); (e)->cmask; (e)++)
|
|
|
|
|
|
/*
|
|
|
* struct x86_pmu - generic x86 pmu
|
|
@@ -114,8 +149,14 @@ struct x86_pmu {
|
|
|
u64 intel_ctrl;
|
|
|
void (*enable_bts)(u64 config);
|
|
|
void (*disable_bts)(void);
|
|
|
- int (*get_event_idx)(struct cpu_hw_events *cpuc,
|
|
|
- struct hw_perf_event *hwc);
|
|
|
+
|
|
|
+ struct event_constraint *
|
|
|
+ (*get_event_constraints)(struct cpu_hw_events *cpuc,
|
|
|
+ struct perf_event *event);
|
|
|
+
|
|
|
+ void (*put_event_constraints)(struct cpu_hw_events *cpuc,
|
|
|
+ struct perf_event *event);
|
|
|
+ struct event_constraint *event_constraints;
|
|
|
};
|
|
|
|
|
|
static struct x86_pmu x86_pmu __read_mostly;
|
|
@@ -124,111 +165,8 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
|
|
|
.enabled = 1,
|
|
|
};
|
|
|
|
|
|
-static const struct event_constraint *event_constraints;
|
|
|
-
|
|
|
-/*
|
|
|
- * Not sure about some of these
|
|
|
- */
|
|
|
-static const u64 p6_perfmon_event_map[] =
|
|
|
-{
|
|
|
- [PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
|
|
|
- [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
|
|
|
- [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e,
|
|
|
- [PERF_COUNT_HW_CACHE_MISSES] = 0x012e,
|
|
|
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
|
|
|
- [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
|
|
|
- [PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
|
|
|
-};
|
|
|
-
|
|
|
-static u64 p6_pmu_event_map(int hw_event)
|
|
|
-{
|
|
|
- return p6_perfmon_event_map[hw_event];
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * Event setting that is specified not to count anything.
|
|
|
- * We use this to effectively disable a counter.
|
|
|
- *
|
|
|
- * L2_RQSTS with 0 MESI unit mask.
|
|
|
- */
|
|
|
-#define P6_NOP_EVENT 0x0000002EULL
|
|
|
-
|
|
|
-static u64 p6_pmu_raw_event(u64 hw_event)
|
|
|
-{
|
|
|
-#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
|
|
|
-#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
|
|
|
-#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
|
|
|
-#define P6_EVNTSEL_INV_MASK 0x00800000ULL
|
|
|
-#define P6_EVNTSEL_REG_MASK 0xFF000000ULL
|
|
|
-
|
|
|
-#define P6_EVNTSEL_MASK \
|
|
|
- (P6_EVNTSEL_EVENT_MASK | \
|
|
|
- P6_EVNTSEL_UNIT_MASK | \
|
|
|
- P6_EVNTSEL_EDGE_MASK | \
|
|
|
- P6_EVNTSEL_INV_MASK | \
|
|
|
- P6_EVNTSEL_REG_MASK)
|
|
|
-
|
|
|
- return hw_event & P6_EVNTSEL_MASK;
|
|
|
-}
|
|
|
-
|
|
|
-static const struct event_constraint intel_p6_event_constraints[] =
|
|
|
-{
|
|
|
- EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
|
|
|
- EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
|
|
|
- EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */
|
|
|
- EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
|
|
|
- EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
|
|
|
- EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
|
|
|
- EVENT_CONSTRAINT_END
|
|
|
-};
|
|
|
-
|
|
|
-/*
|
|
|
- * Intel PerfMon v3. Used on Core2 and later.
|
|
|
- */
|
|
|
-static const u64 intel_perfmon_event_map[] =
|
|
|
-{
|
|
|
- [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
|
|
|
- [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
|
|
|
- [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
|
|
|
- [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
|
|
|
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
|
|
|
- [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
|
|
|
- [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
|
|
|
-};
|
|
|
-
|
|
|
-static const struct event_constraint intel_core_event_constraints[] =
|
|
|
-{
|
|
|
- EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
|
|
|
- EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
|
|
|
- EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
|
|
|
- EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
|
|
|
- EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
|
|
|
- EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
|
|
|
- EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
|
|
|
- EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
|
|
|
- EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
|
|
|
- EVENT_CONSTRAINT_END
|
|
|
-};
|
|
|
-
|
|
|
-static const struct event_constraint intel_nehalem_event_constraints[] =
|
|
|
-{
|
|
|
- EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
|
|
|
- EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
|
|
|
- EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
|
|
|
- EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
|
|
|
- EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
|
|
|
- EVENT_CONSTRAINT(0x4c, 0x3), /* LOAD_HIT_PRE */
|
|
|
- EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
|
|
|
- EVENT_CONSTRAINT(0x52, 0x3), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */
|
|
|
- EVENT_CONSTRAINT(0x53, 0x3), /* L1D_CACHE_LOCK_FB_HIT */
|
|
|
- EVENT_CONSTRAINT(0xc5, 0x3), /* CACHE_LOCK_CYCLES */
|
|
|
- EVENT_CONSTRAINT_END
|
|
|
-};
|
|
|
-
|
|
|
-static u64 intel_pmu_event_map(int hw_event)
|
|
|
-{
|
|
|
- return intel_perfmon_event_map[hw_event];
|
|
|
-}
|
|
|
+static int x86_perf_event_set_period(struct perf_event *event,
|
|
|
+ struct hw_perf_event *hwc, int idx);
|
|
|
|
|
|
/*
|
|
|
* Generalized hw caching related hw_event table, filled
|
|
@@ -245,424 +183,6 @@ static u64 __read_mostly hw_cache_event_ids
|
|
|
[PERF_COUNT_HW_CACHE_OP_MAX]
|
|
|
[PERF_COUNT_HW_CACHE_RESULT_MAX];
|
|
|
|
|
|
-static __initconst u64 nehalem_hw_cache_event_ids
|
|
|
- [PERF_COUNT_HW_CACHE_MAX]
|
|
|
- [PERF_COUNT_HW_CACHE_OP_MAX]
|
|
|
- [PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
|
|
-{
|
|
|
- [ C(L1D) ] = {
|
|
|
- [ C(OP_READ) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
|
|
|
- [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
|
|
|
- },
|
|
|
- [ C(OP_WRITE) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
|
|
|
- [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
|
|
|
- },
|
|
|
- [ C(OP_PREFETCH) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
|
|
|
- [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
|
|
|
- },
|
|
|
- },
|
|
|
- [ C(L1I ) ] = {
|
|
|
- [ C(OP_READ) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
|
|
|
- [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
|
|
|
- },
|
|
|
- [ C(OP_WRITE) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = -1,
|
|
|
- [ C(RESULT_MISS) ] = -1,
|
|
|
- },
|
|
|
- [ C(OP_PREFETCH) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x0,
|
|
|
- [ C(RESULT_MISS) ] = 0x0,
|
|
|
- },
|
|
|
- },
|
|
|
- [ C(LL ) ] = {
|
|
|
- [ C(OP_READ) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
|
|
|
- [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
|
|
|
- },
|
|
|
- [ C(OP_WRITE) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
|
|
|
- [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
|
|
|
- },
|
|
|
- [ C(OP_PREFETCH) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
|
|
|
- [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
|
|
|
- },
|
|
|
- },
|
|
|
- [ C(DTLB) ] = {
|
|
|
- [ C(OP_READ) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
|
|
|
- [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
|
|
|
- },
|
|
|
- [ C(OP_WRITE) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
|
|
|
- [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
|
|
|
- },
|
|
|
- [ C(OP_PREFETCH) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x0,
|
|
|
- [ C(RESULT_MISS) ] = 0x0,
|
|
|
- },
|
|
|
- },
|
|
|
- [ C(ITLB) ] = {
|
|
|
- [ C(OP_READ) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
|
|
|
- [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
|
|
|
- },
|
|
|
- [ C(OP_WRITE) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = -1,
|
|
|
- [ C(RESULT_MISS) ] = -1,
|
|
|
- },
|
|
|
- [ C(OP_PREFETCH) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = -1,
|
|
|
- [ C(RESULT_MISS) ] = -1,
|
|
|
- },
|
|
|
- },
|
|
|
- [ C(BPU ) ] = {
|
|
|
- [ C(OP_READ) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
|
|
|
- [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
|
|
|
- },
|
|
|
- [ C(OP_WRITE) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = -1,
|
|
|
- [ C(RESULT_MISS) ] = -1,
|
|
|
- },
|
|
|
- [ C(OP_PREFETCH) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = -1,
|
|
|
- [ C(RESULT_MISS) ] = -1,
|
|
|
- },
|
|
|
- },
|
|
|
-};
|
|
|
-
|
|
|
-static __initconst u64 core2_hw_cache_event_ids
|
|
|
- [PERF_COUNT_HW_CACHE_MAX]
|
|
|
- [PERF_COUNT_HW_CACHE_OP_MAX]
|
|
|
- [PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
|
|
-{
|
|
|
- [ C(L1D) ] = {
|
|
|
- [ C(OP_READ) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
|
|
|
- [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
|
|
|
- },
|
|
|
- [ C(OP_WRITE) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
|
|
|
- [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
|
|
|
- },
|
|
|
- [ C(OP_PREFETCH) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
|
|
|
- [ C(RESULT_MISS) ] = 0,
|
|
|
- },
|
|
|
- },
|
|
|
- [ C(L1I ) ] = {
|
|
|
- [ C(OP_READ) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
|
|
|
- [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
|
|
|
- },
|
|
|
- [ C(OP_WRITE) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = -1,
|
|
|
- [ C(RESULT_MISS) ] = -1,
|
|
|
- },
|
|
|
- [ C(OP_PREFETCH) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0,
|
|
|
- [ C(RESULT_MISS) ] = 0,
|
|
|
- },
|
|
|
- },
|
|
|
- [ C(LL ) ] = {
|
|
|
- [ C(OP_READ) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
|
|
|
- [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
|
|
|
- },
|
|
|
- [ C(OP_WRITE) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
|
|
|
- [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
|
|
|
- },
|
|
|
- [ C(OP_PREFETCH) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0,
|
|
|
- [ C(RESULT_MISS) ] = 0,
|
|
|
- },
|
|
|
- },
|
|
|
- [ C(DTLB) ] = {
|
|
|
- [ C(OP_READ) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
|
|
|
- [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
|
|
|
- },
|
|
|
- [ C(OP_WRITE) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
|
|
|
- [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
|
|
|
- },
|
|
|
- [ C(OP_PREFETCH) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0,
|
|
|
- [ C(RESULT_MISS) ] = 0,
|
|
|
- },
|
|
|
- },
|
|
|
- [ C(ITLB) ] = {
|
|
|
- [ C(OP_READ) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
|
|
|
- [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
|
|
|
- },
|
|
|
- [ C(OP_WRITE) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = -1,
|
|
|
- [ C(RESULT_MISS) ] = -1,
|
|
|
- },
|
|
|
- [ C(OP_PREFETCH) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = -1,
|
|
|
- [ C(RESULT_MISS) ] = -1,
|
|
|
- },
|
|
|
- },
|
|
|
- [ C(BPU ) ] = {
|
|
|
- [ C(OP_READ) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
|
|
|
- [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
|
|
|
- },
|
|
|
- [ C(OP_WRITE) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = -1,
|
|
|
- [ C(RESULT_MISS) ] = -1,
|
|
|
- },
|
|
|
- [ C(OP_PREFETCH) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = -1,
|
|
|
- [ C(RESULT_MISS) ] = -1,
|
|
|
- },
|
|
|
- },
|
|
|
-};
|
|
|
-
|
|
|
-static __initconst u64 atom_hw_cache_event_ids
|
|
|
- [PERF_COUNT_HW_CACHE_MAX]
|
|
|
- [PERF_COUNT_HW_CACHE_OP_MAX]
|
|
|
- [PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
|
|
-{
|
|
|
- [ C(L1D) ] = {
|
|
|
- [ C(OP_READ) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
|
|
|
- [ C(RESULT_MISS) ] = 0,
|
|
|
- },
|
|
|
- [ C(OP_WRITE) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
|
|
|
- [ C(RESULT_MISS) ] = 0,
|
|
|
- },
|
|
|
- [ C(OP_PREFETCH) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x0,
|
|
|
- [ C(RESULT_MISS) ] = 0,
|
|
|
- },
|
|
|
- },
|
|
|
- [ C(L1I ) ] = {
|
|
|
- [ C(OP_READ) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
|
|
|
- [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
|
|
|
- },
|
|
|
- [ C(OP_WRITE) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = -1,
|
|
|
- [ C(RESULT_MISS) ] = -1,
|
|
|
- },
|
|
|
- [ C(OP_PREFETCH) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0,
|
|
|
- [ C(RESULT_MISS) ] = 0,
|
|
|
- },
|
|
|
- },
|
|
|
- [ C(LL ) ] = {
|
|
|
- [ C(OP_READ) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
|
|
|
- [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
|
|
|
- },
|
|
|
- [ C(OP_WRITE) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
|
|
|
- [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
|
|
|
- },
|
|
|
- [ C(OP_PREFETCH) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0,
|
|
|
- [ C(RESULT_MISS) ] = 0,
|
|
|
- },
|
|
|
- },
|
|
|
- [ C(DTLB) ] = {
|
|
|
- [ C(OP_READ) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
|
|
|
- [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
|
|
|
- },
|
|
|
- [ C(OP_WRITE) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
|
|
|
- [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
|
|
|
- },
|
|
|
- [ C(OP_PREFETCH) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0,
|
|
|
- [ C(RESULT_MISS) ] = 0,
|
|
|
- },
|
|
|
- },
|
|
|
- [ C(ITLB) ] = {
|
|
|
- [ C(OP_READ) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
|
|
|
- [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
|
|
|
- },
|
|
|
- [ C(OP_WRITE) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = -1,
|
|
|
- [ C(RESULT_MISS) ] = -1,
|
|
|
- },
|
|
|
- [ C(OP_PREFETCH) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = -1,
|
|
|
- [ C(RESULT_MISS) ] = -1,
|
|
|
- },
|
|
|
- },
|
|
|
- [ C(BPU ) ] = {
|
|
|
- [ C(OP_READ) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
|
|
|
- [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
|
|
|
- },
|
|
|
- [ C(OP_WRITE) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = -1,
|
|
|
- [ C(RESULT_MISS) ] = -1,
|
|
|
- },
|
|
|
- [ C(OP_PREFETCH) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = -1,
|
|
|
- [ C(RESULT_MISS) ] = -1,
|
|
|
- },
|
|
|
- },
|
|
|
-};
|
|
|
-
|
|
|
-static u64 intel_pmu_raw_event(u64 hw_event)
|
|
|
-{
|
|
|
-#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
|
|
|
-#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
|
|
|
-#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
|
|
|
-#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
|
|
|
-#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
|
|
|
-
|
|
|
-#define CORE_EVNTSEL_MASK \
|
|
|
- (CORE_EVNTSEL_EVENT_MASK | \
|
|
|
- CORE_EVNTSEL_UNIT_MASK | \
|
|
|
- CORE_EVNTSEL_EDGE_MASK | \
|
|
|
- CORE_EVNTSEL_INV_MASK | \
|
|
|
- CORE_EVNTSEL_REG_MASK)
|
|
|
-
|
|
|
- return hw_event & CORE_EVNTSEL_MASK;
|
|
|
-}
|
|
|
-
|
|
|
-static __initconst u64 amd_hw_cache_event_ids
|
|
|
- [PERF_COUNT_HW_CACHE_MAX]
|
|
|
- [PERF_COUNT_HW_CACHE_OP_MAX]
|
|
|
- [PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
|
|
-{
|
|
|
- [ C(L1D) ] = {
|
|
|
- [ C(OP_READ) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
|
|
|
- [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */
|
|
|
- },
|
|
|
- [ C(OP_WRITE) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
|
|
|
- [ C(RESULT_MISS) ] = 0,
|
|
|
- },
|
|
|
- [ C(OP_PREFETCH) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
|
|
|
- [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
|
|
|
- },
|
|
|
- },
|
|
|
- [ C(L1I ) ] = {
|
|
|
- [ C(OP_READ) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
|
|
|
- [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
|
|
|
- },
|
|
|
- [ C(OP_WRITE) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = -1,
|
|
|
- [ C(RESULT_MISS) ] = -1,
|
|
|
- },
|
|
|
- [ C(OP_PREFETCH) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
|
|
|
- [ C(RESULT_MISS) ] = 0,
|
|
|
- },
|
|
|
- },
|
|
|
- [ C(LL ) ] = {
|
|
|
- [ C(OP_READ) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
|
|
|
- [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
|
|
|
- },
|
|
|
- [ C(OP_WRITE) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
|
|
|
- [ C(RESULT_MISS) ] = 0,
|
|
|
- },
|
|
|
- [ C(OP_PREFETCH) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0,
|
|
|
- [ C(RESULT_MISS) ] = 0,
|
|
|
- },
|
|
|
- },
|
|
|
- [ C(DTLB) ] = {
|
|
|
- [ C(OP_READ) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
|
|
|
- [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */
|
|
|
- },
|
|
|
- [ C(OP_WRITE) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0,
|
|
|
- [ C(RESULT_MISS) ] = 0,
|
|
|
- },
|
|
|
- [ C(OP_PREFETCH) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0,
|
|
|
- [ C(RESULT_MISS) ] = 0,
|
|
|
- },
|
|
|
- },
|
|
|
- [ C(ITLB) ] = {
|
|
|
- [ C(OP_READ) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
|
|
|
- [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */
|
|
|
- },
|
|
|
- [ C(OP_WRITE) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = -1,
|
|
|
- [ C(RESULT_MISS) ] = -1,
|
|
|
- },
|
|
|
- [ C(OP_PREFETCH) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = -1,
|
|
|
- [ C(RESULT_MISS) ] = -1,
|
|
|
- },
|
|
|
- },
|
|
|
- [ C(BPU ) ] = {
|
|
|
- [ C(OP_READ) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
|
|
|
- [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
|
|
|
- },
|
|
|
- [ C(OP_WRITE) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = -1,
|
|
|
- [ C(RESULT_MISS) ] = -1,
|
|
|
- },
|
|
|
- [ C(OP_PREFETCH) ] = {
|
|
|
- [ C(RESULT_ACCESS) ] = -1,
|
|
|
- [ C(RESULT_MISS) ] = -1,
|
|
|
- },
|
|
|
- },
|
|
|
-};
|
|
|
-
|
|
|
-/*
|
|
|
- * AMD Performance Monitor K7 and later.
|
|
|
- */
|
|
|
-static const u64 amd_perfmon_event_map[] =
|
|
|
-{
|
|
|
- [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
|
|
|
- [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
|
|
|
- [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
|
|
|
- [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
|
|
|
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
|
|
|
- [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
|
|
|
-};
|
|
|
-
|
|
|
-static u64 amd_pmu_event_map(int hw_event)
|
|
|
-{
|
|
|
- return amd_perfmon_event_map[hw_event];
|
|
|
-}
|
|
|
-
|
|
|
-static u64 amd_pmu_raw_event(u64 hw_event)
|
|
|
-{
|
|
|
-#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
|
|
|
-#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
|
|
|
-#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
|
|
|
-#define K7_EVNTSEL_INV_MASK 0x000800000ULL
|
|
|
-#define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
|
|
|
-
|
|
|
-#define K7_EVNTSEL_MASK \
|
|
|
- (K7_EVNTSEL_EVENT_MASK | \
|
|
|
- K7_EVNTSEL_UNIT_MASK | \
|
|
|
- K7_EVNTSEL_EDGE_MASK | \
|
|
|
- K7_EVNTSEL_INV_MASK | \
|
|
|
- K7_EVNTSEL_REG_MASK)
|
|
|
-
|
|
|
- return hw_event & K7_EVNTSEL_MASK;
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Propagate event elapsed time into the generic event.
|
|
|
* Can only be executed on the CPU where the event is active.
|
|
@@ -914,42 +434,6 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void intel_pmu_enable_bts(u64 config)
|
|
|
-{
|
|
|
- unsigned long debugctlmsr;
|
|
|
-
|
|
|
- debugctlmsr = get_debugctlmsr();
|
|
|
-
|
|
|
- debugctlmsr |= X86_DEBUGCTL_TR;
|
|
|
- debugctlmsr |= X86_DEBUGCTL_BTS;
|
|
|
- debugctlmsr |= X86_DEBUGCTL_BTINT;
|
|
|
-
|
|
|
- if (!(config & ARCH_PERFMON_EVENTSEL_OS))
|
|
|
- debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
|
|
|
-
|
|
|
- if (!(config & ARCH_PERFMON_EVENTSEL_USR))
|
|
|
- debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
|
|
|
-
|
|
|
- update_debugctlmsr(debugctlmsr);
|
|
|
-}
|
|
|
-
|
|
|
-static void intel_pmu_disable_bts(void)
|
|
|
-{
|
|
|
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
- unsigned long debugctlmsr;
|
|
|
-
|
|
|
- if (!cpuc->ds)
|
|
|
- return;
|
|
|
-
|
|
|
- debugctlmsr = get_debugctlmsr();
|
|
|
-
|
|
|
- debugctlmsr &=
|
|
|
- ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
|
|
|
- X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
|
|
|
-
|
|
|
- update_debugctlmsr(debugctlmsr);
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Setup the hardware configuration for a given attr_type
|
|
|
*/
|
|
@@ -988,6 +472,8 @@ static int __hw_perf_event_init(struct perf_event *event)
|
|
|
hwc->config = ARCH_PERFMON_EVENTSEL_INT;
|
|
|
|
|
|
hwc->idx = -1;
|
|
|
+ hwc->last_cpu = -1;
|
|
|
+ hwc->last_tag = ~0ULL;
|
|
|
|
|
|
/*
|
|
|
* Count user and OS events unless requested not to.
|
|
@@ -1056,216 +542,323 @@ static int __hw_perf_event_init(struct perf_event *event)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void p6_pmu_disable_all(void)
|
|
|
+static void x86_pmu_disable_all(void)
|
|
|
{
|
|
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
- u64 val;
|
|
|
-
|
|
|
- if (!cpuc->enabled)
|
|
|
- return;
|
|
|
+ int idx;
|
|
|
|
|
|
- cpuc->enabled = 0;
|
|
|
- barrier();
|
|
|
+ for (idx = 0; idx < x86_pmu.num_events; idx++) {
|
|
|
+ u64 val;
|
|
|
|
|
|
- /* p6 only has one enable register */
|
|
|
- rdmsrl(MSR_P6_EVNTSEL0, val);
|
|
|
- val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
|
|
|
- wrmsrl(MSR_P6_EVNTSEL0, val);
|
|
|
+ if (!test_bit(idx, cpuc->active_mask))
|
|
|
+ continue;
|
|
|
+ rdmsrl(x86_pmu.eventsel + idx, val);
|
|
|
+ if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
|
|
|
+ continue;
|
|
|
+ val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
|
|
|
+ wrmsrl(x86_pmu.eventsel + idx, val);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
-static void intel_pmu_disable_all(void)
|
|
|
+void hw_perf_disable(void)
|
|
|
{
|
|
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
|
|
|
+ if (!x86_pmu_initialized())
|
|
|
+ return;
|
|
|
+
|
|
|
if (!cpuc->enabled)
|
|
|
return;
|
|
|
|
|
|
+ cpuc->n_added = 0;
|
|
|
cpuc->enabled = 0;
|
|
|
barrier();
|
|
|
|
|
|
- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
|
|
|
-
|
|
|
- if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
|
|
|
- intel_pmu_disable_bts();
|
|
|
+ x86_pmu.disable_all();
|
|
|
}
|
|
|
|
|
|
-static void amd_pmu_disable_all(void)
|
|
|
+static void x86_pmu_enable_all(void)
|
|
|
{
|
|
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
int idx;
|
|
|
|
|
|
- if (!cpuc->enabled)
|
|
|
- return;
|
|
|
-
|
|
|
- cpuc->enabled = 0;
|
|
|
- /*
|
|
|
- * ensure we write the disable before we start disabling the
|
|
|
- * events proper, so that amd_pmu_enable_event() does the
|
|
|
- * right thing.
|
|
|
- */
|
|
|
- barrier();
|
|
|
-
|
|
|
for (idx = 0; idx < x86_pmu.num_events; idx++) {
|
|
|
+ struct perf_event *event = cpuc->events[idx];
|
|
|
u64 val;
|
|
|
|
|
|
if (!test_bit(idx, cpuc->active_mask))
|
|
|
continue;
|
|
|
- rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
|
|
|
- if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
|
|
|
- continue;
|
|
|
- val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
|
|
|
- wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
|
|
|
+
|
|
|
+ val = event->hw.config;
|
|
|
+ val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
|
|
|
+ wrmsrl(x86_pmu.eventsel + idx, val);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-void hw_perf_disable(void)
|
|
|
+static const struct pmu pmu;
|
|
|
+
|
|
|
+static inline int is_x86_event(struct perf_event *event)
|
|
|
{
|
|
|
- if (!x86_pmu_initialized())
|
|
|
- return;
|
|
|
- return x86_pmu.disable_all();
|
|
|
+ return event->pmu == &pmu;
|
|
|
}
|
|
|
|
|
|
-static void p6_pmu_enable_all(void)
|
|
|
+static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
|
|
|
{
|
|
|
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
- unsigned long val;
|
|
|
+ struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
|
|
|
+ unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
|
|
+ int i, j, w, wmax, num = 0;
|
|
|
+ struct hw_perf_event *hwc;
|
|
|
|
|
|
- if (cpuc->enabled)
|
|
|
- return;
|
|
|
+ bitmap_zero(used_mask, X86_PMC_IDX_MAX);
|
|
|
|
|
|
- cpuc->enabled = 1;
|
|
|
- barrier();
|
|
|
+ for (i = 0; i < n; i++) {
|
|
|
+ constraints[i] =
|
|
|
+ x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
|
|
|
+ }
|
|
|
|
|
|
- /* p6 only has one enable register */
|
|
|
- rdmsrl(MSR_P6_EVNTSEL0, val);
|
|
|
- val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
|
|
|
- wrmsrl(MSR_P6_EVNTSEL0, val);
|
|
|
-}
|
|
|
+ /*
|
|
|
+ * fastpath, try to reuse previous register
|
|
|
+ */
|
|
|
+ for (i = 0; i < n; i++) {
|
|
|
+ hwc = &cpuc->event_list[i]->hw;
|
|
|
+ c = constraints[i];
|
|
|
|
|
|
-static void intel_pmu_enable_all(void)
|
|
|
-{
|
|
|
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
+ /* never assigned */
|
|
|
+ if (hwc->idx == -1)
|
|
|
+ break;
|
|
|
|
|
|
- if (cpuc->enabled)
|
|
|
- return;
|
|
|
+ /* constraint still honored */
|
|
|
+ if (!test_bit(hwc->idx, c->idxmsk))
|
|
|
+ break;
|
|
|
|
|
|
- cpuc->enabled = 1;
|
|
|
- barrier();
|
|
|
+ /* not already used */
|
|
|
+ if (test_bit(hwc->idx, used_mask))
|
|
|
+ break;
|
|
|
+
|
|
|
+ set_bit(hwc->idx, used_mask);
|
|
|
+ if (assign)
|
|
|
+ assign[i] = hwc->idx;
|
|
|
+ }
|
|
|
+ if (i == n)
|
|
|
+ goto done;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * begin slow path
|
|
|
+ */
|
|
|
+
|
|
|
+ bitmap_zero(used_mask, X86_PMC_IDX_MAX);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * weight = number of possible counters
|
|
|
+ *
|
|
|
+ * 1 = most constrained, only works on one counter
|
|
|
+ * wmax = least constrained, works on any counter
|
|
|
+ *
|
|
|
+ * assign events to counters starting with most
|
|
|
+ * constrained events.
|
|
|
+ */
|
|
|
+ wmax = x86_pmu.num_events;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * when fixed event counters are present,
|
|
|
+ * wmax is incremented by 1 to account
|
|
|
+ * for one more choice
|
|
|
+ */
|
|
|
+ if (x86_pmu.num_events_fixed)
|
|
|
+ wmax++;
|
|
|
+
|
|
|
+ for (w = 1, num = n; num && w <= wmax; w++) {
|
|
|
+ /* for each event */
|
|
|
+ for (i = 0; num && i < n; i++) {
|
|
|
+ c = constraints[i];
|
|
|
+ hwc = &cpuc->event_list[i]->hw;
|
|
|
+
|
|
|
+ if (c->weight != w)
|
|
|
+ continue;
|
|
|
|
|
|
- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
|
|
|
+ for_each_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
|
|
|
+ if (!test_bit(j, used_mask))
|
|
|
+ break;
|
|
|
+ }
|
|
|
|
|
|
- if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
|
|
|
- struct perf_event *event =
|
|
|
- cpuc->events[X86_PMC_IDX_FIXED_BTS];
|
|
|
+ if (j == X86_PMC_IDX_MAX)
|
|
|
+ break;
|
|
|
|
|
|
- if (WARN_ON_ONCE(!event))
|
|
|
- return;
|
|
|
+ set_bit(j, used_mask);
|
|
|
|
|
|
- intel_pmu_enable_bts(event->hw.config);
|
|
|
+ if (assign)
|
|
|
+ assign[i] = j;
|
|
|
+ num--;
|
|
|
+ }
|
|
|
+ }
|
|
|
+done:
|
|
|
+ /*
|
|
|
+ * scheduling failed or is just a simulation,
|
|
|
+ * free resources if necessary
|
|
|
+ */
|
|
|
+ if (!assign || num) {
|
|
|
+ for (i = 0; i < n; i++) {
|
|
|
+ if (x86_pmu.put_event_constraints)
|
|
|
+ x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
|
|
|
+ }
|
|
|
}
|
|
|
+ return num ? -ENOSPC : 0;
|
|
|
}
|
|
|
|
|
|
-static void amd_pmu_enable_all(void)
|
|
|
+/*
|
|
|
+ * dogrp: true if must collect siblings events (group)
|
|
|
+ * returns total number of events and error code
|
|
|
+ */
|
|
|
+static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
|
|
|
{
|
|
|
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
- int idx;
|
|
|
+ struct perf_event *event;
|
|
|
+ int n, max_count;
|
|
|
|
|
|
- if (cpuc->enabled)
|
|
|
- return;
|
|
|
+ max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;
|
|
|
|
|
|
- cpuc->enabled = 1;
|
|
|
- barrier();
|
|
|
+ /* current number of events already accepted */
|
|
|
+ n = cpuc->n_events;
|
|
|
|
|
|
- for (idx = 0; idx < x86_pmu.num_events; idx++) {
|
|
|
- struct perf_event *event = cpuc->events[idx];
|
|
|
- u64 val;
|
|
|
+ if (is_x86_event(leader)) {
|
|
|
+ if (n >= max_count)
|
|
|
+ return -ENOSPC;
|
|
|
+ cpuc->event_list[n] = leader;
|
|
|
+ n++;
|
|
|
+ }
|
|
|
+ if (!dogrp)
|
|
|
+ return n;
|
|
|
|
|
|
- if (!test_bit(idx, cpuc->active_mask))
|
|
|
+ list_for_each_entry(event, &leader->sibling_list, group_entry) {
|
|
|
+ if (!is_x86_event(event) ||
|
|
|
+ event->state <= PERF_EVENT_STATE_OFF)
|
|
|
continue;
|
|
|
|
|
|
- val = event->hw.config;
|
|
|
- val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
|
|
|
- wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
|
|
|
- }
|
|
|
-}
|
|
|
+ if (n >= max_count)
|
|
|
+ return -ENOSPC;
|
|
|
|
|
|
-void hw_perf_enable(void)
|
|
|
-{
|
|
|
- if (!x86_pmu_initialized())
|
|
|
- return;
|
|
|
- x86_pmu.enable_all();
|
|
|
+ cpuc->event_list[n] = event;
|
|
|
+ n++;
|
|
|
+ }
|
|
|
+ return n;
|
|
|
}
|
|
|
|
|
|
-static inline u64 intel_pmu_get_status(void)
|
|
|
+static inline void x86_assign_hw_event(struct perf_event *event,
|
|
|
+ struct cpu_hw_events *cpuc, int i)
|
|
|
{
|
|
|
- u64 status;
|
|
|
+ struct hw_perf_event *hwc = &event->hw;
|
|
|
|
|
|
- rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
|
|
|
+ hwc->idx = cpuc->assign[i];
|
|
|
+ hwc->last_cpu = smp_processor_id();
|
|
|
+ hwc->last_tag = ++cpuc->tags[i];
|
|
|
|
|
|
- return status;
|
|
|
+ if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
|
|
|
+ hwc->config_base = 0;
|
|
|
+ hwc->event_base = 0;
|
|
|
+ } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
|
|
|
+ hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
|
|
|
+ /*
|
|
|
+ * We set it so that event_base + idx in wrmsr/rdmsr maps to
|
|
|
+ * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
|
|
|
+ */
|
|
|
+ hwc->event_base =
|
|
|
+ MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
|
|
|
+ } else {
|
|
|
+ hwc->config_base = x86_pmu.eventsel;
|
|
|
+ hwc->event_base = x86_pmu.perfctr;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
-static inline void intel_pmu_ack_status(u64 ack)
|
|
|
+static inline int match_prev_assignment(struct hw_perf_event *hwc,
|
|
|
+ struct cpu_hw_events *cpuc,
|
|
|
+ int i)
|
|
|
{
|
|
|
- wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
|
|
|
+ return hwc->idx == cpuc->assign[i] &&
|
|
|
+ hwc->last_cpu == smp_processor_id() &&
|
|
|
+ hwc->last_tag == cpuc->tags[i];
|
|
|
}
|
|
|
|
|
|
-static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
|
|
-{
|
|
|
- (void)checking_wrmsrl(hwc->config_base + idx,
|
|
|
- hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
|
|
|
-}
|
|
|
+static void x86_pmu_stop(struct perf_event *event);
|
|
|
|
|
|
-static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
|
|
+void hw_perf_enable(void)
|
|
|
{
|
|
|
- (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
|
|
|
-}
|
|
|
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
+ struct perf_event *event;
|
|
|
+ struct hw_perf_event *hwc;
|
|
|
+ int i;
|
|
|
|
|
|
-static inline void
|
|
|
-intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
|
|
|
-{
|
|
|
- int idx = __idx - X86_PMC_IDX_FIXED;
|
|
|
- u64 ctrl_val, mask;
|
|
|
+ if (!x86_pmu_initialized())
|
|
|
+ return;
|
|
|
|
|
|
- mask = 0xfULL << (idx * 4);
|
|
|
+ if (cpuc->enabled)
|
|
|
+ return;
|
|
|
|
|
|
- rdmsrl(hwc->config_base, ctrl_val);
|
|
|
- ctrl_val &= ~mask;
|
|
|
- (void)checking_wrmsrl(hwc->config_base, ctrl_val);
|
|
|
-}
|
|
|
+ if (cpuc->n_added) {
|
|
|
+ /*
|
|
|
+ * apply assignment obtained either from
|
|
|
+ * hw_perf_group_sched_in() or x86_pmu_enable()
|
|
|
+ *
|
|
|
+ * step1: save events moving to new counters
|
|
|
+ * step2: reprogram moved events into new counters
|
|
|
+ */
|
|
|
+ for (i = 0; i < cpuc->n_events; i++) {
|
|
|
|
|
|
-static inline void
|
|
|
-p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
|
|
-{
|
|
|
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
- u64 val = P6_NOP_EVENT;
|
|
|
+ event = cpuc->event_list[i];
|
|
|
+ hwc = &event->hw;
|
|
|
|
|
|
- if (cpuc->enabled)
|
|
|
- val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
|
|
|
+ /*
|
|
|
+ * we can avoid reprogramming counter if:
|
|
|
+ * - assigned same counter as last time
|
|
|
+ * - running on same CPU as last time
|
|
|
+ * - no other event has used the counter since
|
|
|
+ */
|
|
|
+ if (hwc->idx == -1 ||
|
|
|
+ match_prev_assignment(hwc, cpuc, i))
|
|
|
+ continue;
|
|
|
|
|
|
- (void)checking_wrmsrl(hwc->config_base + idx, val);
|
|
|
-}
|
|
|
+ x86_pmu_stop(event);
|
|
|
|
|
|
-static inline void
|
|
|
-intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
|
|
-{
|
|
|
- if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
|
|
|
- intel_pmu_disable_bts();
|
|
|
- return;
|
|
|
- }
|
|
|
+ hwc->idx = -1;
|
|
|
+ }
|
|
|
|
|
|
- if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
|
|
|
- intel_pmu_disable_fixed(hwc, idx);
|
|
|
- return;
|
|
|
+ for (i = 0; i < cpuc->n_events; i++) {
|
|
|
+
|
|
|
+ event = cpuc->event_list[i];
|
|
|
+ hwc = &event->hw;
|
|
|
+
|
|
|
+ if (hwc->idx == -1) {
|
|
|
+ x86_assign_hw_event(event, cpuc, i);
|
|
|
+ x86_perf_event_set_period(event, hwc, hwc->idx);
|
|
|
+ }
|
|
|
+ /*
|
|
|
+ * need to mark as active because x86_pmu_disable()
|
|
|
+ * clear active_mask and events[] yet it preserves
|
|
|
+ * idx
|
|
|
+ */
|
|
|
+ set_bit(hwc->idx, cpuc->active_mask);
|
|
|
+ cpuc->events[hwc->idx] = event;
|
|
|
+
|
|
|
+ x86_pmu.enable(hwc, hwc->idx);
|
|
|
+ perf_event_update_userpage(event);
|
|
|
+ }
|
|
|
+ cpuc->n_added = 0;
|
|
|
+ perf_events_lapic_init();
|
|
|
}
|
|
|
|
|
|
- x86_pmu_disable_event(hwc, idx);
|
|
|
+ cpuc->enabled = 1;
|
|
|
+ barrier();
|
|
|
+
|
|
|
+ x86_pmu.enable_all();
|
|
|
}
|
|
|
|
|
|
-static inline void
|
|
|
-amd_pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
|
|
+static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
|
|
{
|
|
|
- x86_pmu_disable_event(hwc, idx);
|
|
|
+ (void)checking_wrmsrl(hwc->config_base + idx,
|
|
|
+ hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
|
|
+{
|
|
|
+ (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
|
|
|
}
|
|
|
|
|
|
static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
|
|
@@ -1326,220 +919,60 @@ x86_perf_event_set_period(struct perf_event *event,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static inline void
|
|
|
-intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
|
|
|
-{
|
|
|
- int idx = __idx - X86_PMC_IDX_FIXED;
|
|
|
- u64 ctrl_val, bits, mask;
|
|
|
- int err;
|
|
|
-
|
|
|
- /*
|
|
|
- * Enable IRQ generation (0x8),
|
|
|
- * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
|
|
|
- * if requested:
|
|
|
- */
|
|
|
- bits = 0x8ULL;
|
|
|
- if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
|
|
|
- bits |= 0x2;
|
|
|
- if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
|
|
|
- bits |= 0x1;
|
|
|
-
|
|
|
- /*
|
|
|
- * ANY bit is supported in v3 and up
|
|
|
- */
|
|
|
- if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
|
|
|
- bits |= 0x4;
|
|
|
-
|
|
|
- bits <<= (idx * 4);
|
|
|
- mask = 0xfULL << (idx * 4);
|
|
|
-
|
|
|
- rdmsrl(hwc->config_base, ctrl_val);
|
|
|
- ctrl_val &= ~mask;
|
|
|
- ctrl_val |= bits;
|
|
|
- err = checking_wrmsrl(hwc->config_base, ctrl_val);
|
|
|
-}
|
|
|
-
|
|
|
-static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
|
|
+static void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
|
|
{
|
|
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
- u64 val;
|
|
|
-
|
|
|
- val = hwc->config;
|
|
|
if (cpuc->enabled)
|
|
|
- val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
|
|
|
-
|
|
|
- (void)checking_wrmsrl(hwc->config_base + idx, val);
|
|
|
+ __x86_pmu_enable_event(hwc, idx);
|
|
|
}
|
|
|
|
|
|
-
|
|
|
-static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
|
|
-{
|
|
|
- if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
|
|
|
- if (!__get_cpu_var(cpu_hw_events).enabled)
|
|
|
- return;
|
|
|
-
|
|
|
- intel_pmu_enable_bts(hwc->config);
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
|
|
|
- intel_pmu_enable_fixed(hwc, idx);
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- x86_pmu_enable_event(hwc, idx);
|
|
|
-}
|
|
|
-
|
|
|
-static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
|
|
+/*
|
|
|
+ * activate a single event
|
|
|
+ *
|
|
|
+ * The event is added to the group of enabled events
|
|
|
+ * but only if it can be scehduled with existing events.
|
|
|
+ *
|
|
|
+ * Called with PMU disabled. If successful and return value 1,
|
|
|
+ * then guaranteed to call perf_enable() and hw_perf_enable()
|
|
|
+ */
|
|
|
+static int x86_pmu_enable(struct perf_event *event)
|
|
|
{
|
|
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
+ struct hw_perf_event *hwc;
|
|
|
+ int assign[X86_PMC_IDX_MAX];
|
|
|
+ int n, n0, ret;
|
|
|
|
|
|
- if (cpuc->enabled)
|
|
|
- x86_pmu_enable_event(hwc, idx);
|
|
|
-}
|
|
|
-
|
|
|
-static int fixed_mode_idx(struct hw_perf_event *hwc)
|
|
|
-{
|
|
|
- unsigned int hw_event;
|
|
|
-
|
|
|
- hw_event = hwc->config & ARCH_PERFMON_EVENT_MASK;
|
|
|
-
|
|
|
- if (unlikely((hw_event ==
|
|
|
- x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
|
|
|
- (hwc->sample_period == 1)))
|
|
|
- return X86_PMC_IDX_FIXED_BTS;
|
|
|
+ hwc = &event->hw;
|
|
|
|
|
|
- if (!x86_pmu.num_events_fixed)
|
|
|
- return -1;
|
|
|
+ n0 = cpuc->n_events;
|
|
|
+ n = collect_events(cpuc, event, false);
|
|
|
+ if (n < 0)
|
|
|
+ return n;
|
|
|
|
|
|
+ ret = x86_schedule_events(cpuc, n, assign);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
/*
|
|
|
- * fixed counters do not take all possible filters
|
|
|
+ * copy new assignment, now we know it is possible
|
|
|
+ * will be used by hw_perf_enable()
|
|
|
*/
|
|
|
- if (hwc->config & ARCH_PERFMON_EVENT_FILTER_MASK)
|
|
|
- return -1;
|
|
|
-
|
|
|
- if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
|
|
|
- return X86_PMC_IDX_FIXED_INSTRUCTIONS;
|
|
|
- if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES)))
|
|
|
- return X86_PMC_IDX_FIXED_CPU_CYCLES;
|
|
|
- if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES)))
|
|
|
- return X86_PMC_IDX_FIXED_BUS_CYCLES;
|
|
|
+ memcpy(cpuc->assign, assign, n*sizeof(int));
|
|
|
|
|
|
- return -1;
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * generic counter allocator: get next free counter
|
|
|
- */
|
|
|
-static int
|
|
|
-gen_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
|
|
|
-{
|
|
|
- int idx;
|
|
|
-
|
|
|
- idx = find_first_zero_bit(cpuc->used_mask, x86_pmu.num_events);
|
|
|
- return idx == x86_pmu.num_events ? -1 : idx;
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * intel-specific counter allocator: check event constraints
|
|
|
- */
|
|
|
-static int
|
|
|
-intel_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
|
|
|
-{
|
|
|
- const struct event_constraint *event_constraint;
|
|
|
- int i, code;
|
|
|
-
|
|
|
- if (!event_constraints)
|
|
|
- goto skip;
|
|
|
-
|
|
|
- code = hwc->config & CORE_EVNTSEL_EVENT_MASK;
|
|
|
-
|
|
|
- for_each_event_constraint(event_constraint, event_constraints) {
|
|
|
- if (code == event_constraint->code) {
|
|
|
- for_each_bit(i, event_constraint->idxmsk, X86_PMC_IDX_MAX) {
|
|
|
- if (!test_and_set_bit(i, cpuc->used_mask))
|
|
|
- return i;
|
|
|
- }
|
|
|
- return -1;
|
|
|
- }
|
|
|
- }
|
|
|
-skip:
|
|
|
- return gen_get_event_idx(cpuc, hwc);
|
|
|
-}
|
|
|
-
|
|
|
-static int
|
|
|
-x86_schedule_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
|
|
|
-{
|
|
|
- int idx;
|
|
|
-
|
|
|
- idx = fixed_mode_idx(hwc);
|
|
|
- if (idx == X86_PMC_IDX_FIXED_BTS) {
|
|
|
- /* BTS is already occupied. */
|
|
|
- if (test_and_set_bit(idx, cpuc->used_mask))
|
|
|
- return -EAGAIN;
|
|
|
-
|
|
|
- hwc->config_base = 0;
|
|
|
- hwc->event_base = 0;
|
|
|
- hwc->idx = idx;
|
|
|
- } else if (idx >= 0) {
|
|
|
- /*
|
|
|
- * Try to get the fixed event, if that is already taken
|
|
|
- * then try to get a generic event:
|
|
|
- */
|
|
|
- if (test_and_set_bit(idx, cpuc->used_mask))
|
|
|
- goto try_generic;
|
|
|
-
|
|
|
- hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
|
|
|
- /*
|
|
|
- * We set it so that event_base + idx in wrmsr/rdmsr maps to
|
|
|
- * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
|
|
|
- */
|
|
|
- hwc->event_base =
|
|
|
- MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
|
|
|
- hwc->idx = idx;
|
|
|
- } else {
|
|
|
- idx = hwc->idx;
|
|
|
- /* Try to get the previous generic event again */
|
|
|
- if (idx == -1 || test_and_set_bit(idx, cpuc->used_mask)) {
|
|
|
-try_generic:
|
|
|
- idx = x86_pmu.get_event_idx(cpuc, hwc);
|
|
|
- if (idx == -1)
|
|
|
- return -EAGAIN;
|
|
|
-
|
|
|
- set_bit(idx, cpuc->used_mask);
|
|
|
- hwc->idx = idx;
|
|
|
- }
|
|
|
- hwc->config_base = x86_pmu.eventsel;
|
|
|
- hwc->event_base = x86_pmu.perfctr;
|
|
|
- }
|
|
|
+ cpuc->n_events = n;
|
|
|
+ cpuc->n_added = n - n0;
|
|
|
|
|
|
- return idx;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Find a PMC slot for the freshly enabled / scheduled in event:
|
|
|
- */
|
|
|
-static int x86_pmu_enable(struct perf_event *event)
|
|
|
+static int x86_pmu_start(struct perf_event *event)
|
|
|
{
|
|
|
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
- int idx;
|
|
|
-
|
|
|
- idx = x86_schedule_event(cpuc, hwc);
|
|
|
- if (idx < 0)
|
|
|
- return idx;
|
|
|
-
|
|
|
- perf_events_lapic_init();
|
|
|
-
|
|
|
- x86_pmu.disable(hwc, idx);
|
|
|
-
|
|
|
- cpuc->events[idx] = event;
|
|
|
- set_bit(idx, cpuc->active_mask);
|
|
|
|
|
|
- x86_perf_event_set_period(event, hwc, idx);
|
|
|
- x86_pmu.enable(hwc, idx);
|
|
|
+ if (hwc->idx == -1)
|
|
|
+ return -EAGAIN;
|
|
|
|
|
|
- perf_event_update_userpage(event);
|
|
|
+ x86_perf_event_set_period(event, hwc, hwc->idx);
|
|
|
+ x86_pmu.enable(hwc, hwc->idx);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -1583,7 +1016,7 @@ void perf_event_print_debug(void)
|
|
|
pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
|
|
|
pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
|
|
|
}
|
|
|
- pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask);
|
|
|
+ pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
|
|
|
|
|
|
for (idx = 0; idx < x86_pmu.num_events; idx++) {
|
|
|
rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
|
|
@@ -1607,67 +1040,7 @@ void perf_event_print_debug(void)
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
|
|
|
-static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
|
|
|
-{
|
|
|
- struct debug_store *ds = cpuc->ds;
|
|
|
- struct bts_record {
|
|
|
- u64 from;
|
|
|
- u64 to;
|
|
|
- u64 flags;
|
|
|
- };
|
|
|
- struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
|
|
|
- struct bts_record *at, *top;
|
|
|
- struct perf_output_handle handle;
|
|
|
- struct perf_event_header header;
|
|
|
- struct perf_sample_data data;
|
|
|
- struct pt_regs regs;
|
|
|
-
|
|
|
- if (!event)
|
|
|
- return;
|
|
|
-
|
|
|
- if (!ds)
|
|
|
- return;
|
|
|
-
|
|
|
- at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
|
|
|
- top = (struct bts_record *)(unsigned long)ds->bts_index;
|
|
|
-
|
|
|
- if (top <= at)
|
|
|
- return;
|
|
|
-
|
|
|
- ds->bts_index = ds->bts_buffer_base;
|
|
|
-
|
|
|
-
|
|
|
- data.period = event->hw.last_period;
|
|
|
- data.addr = 0;
|
|
|
- data.raw = NULL;
|
|
|
- regs.ip = 0;
|
|
|
-
|
|
|
- /*
|
|
|
- * Prepare a generic sample, i.e. fill in the invariant fields.
|
|
|
- * We will overwrite the from and to address before we output
|
|
|
- * the sample.
|
|
|
- */
|
|
|
- perf_prepare_sample(&header, &data, event, ®s);
|
|
|
-
|
|
|
- if (perf_output_begin(&handle, event,
|
|
|
- header.size * (top - at), 1, 1))
|
|
|
- return;
|
|
|
-
|
|
|
- for (; at < top; at++) {
|
|
|
- data.ip = at->from;
|
|
|
- data.addr = at->to;
|
|
|
-
|
|
|
- perf_output_sample(&handle, &header, &data, event);
|
|
|
- }
|
|
|
-
|
|
|
- perf_output_end(&handle);
|
|
|
-
|
|
|
- /* There's new data available. */
|
|
|
- event->hw.interrupts++;
|
|
|
- event->pending_kill = POLL_IN;
|
|
|
-}
|
|
|
-
|
|
|
-static void x86_pmu_disable(struct perf_event *event)
|
|
|
+static void x86_pmu_stop(struct perf_event *event)
|
|
|
{
|
|
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
@@ -1680,184 +1053,39 @@ static void x86_pmu_disable(struct perf_event *event)
|
|
|
clear_bit(idx, cpuc->active_mask);
|
|
|
x86_pmu.disable(hwc, idx);
|
|
|
|
|
|
- /*
|
|
|
- * Make sure the cleared pointer becomes visible before we
|
|
|
- * (potentially) free the event:
|
|
|
- */
|
|
|
- barrier();
|
|
|
-
|
|
|
/*
|
|
|
* Drain the remaining delta count out of a event
|
|
|
* that we are disabling:
|
|
|
*/
|
|
|
x86_perf_event_update(event, hwc, idx);
|
|
|
|
|
|
- /* Drain the remaining BTS records. */
|
|
|
- if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
|
|
|
- intel_pmu_drain_bts_buffer(cpuc);
|
|
|
-
|
|
|
cpuc->events[idx] = NULL;
|
|
|
- clear_bit(idx, cpuc->used_mask);
|
|
|
-
|
|
|
- perf_event_update_userpage(event);
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * Save and restart an expired event. Called by NMI contexts,
|
|
|
- * so it has to be careful about preempting normal event ops:
|
|
|
- */
|
|
|
-static int intel_pmu_save_and_restart(struct perf_event *event)
|
|
|
-{
|
|
|
- struct hw_perf_event *hwc = &event->hw;
|
|
|
- int idx = hwc->idx;
|
|
|
- int ret;
|
|
|
-
|
|
|
- x86_perf_event_update(event, hwc, idx);
|
|
|
- ret = x86_perf_event_set_period(event, hwc, idx);
|
|
|
-
|
|
|
- if (event->state == PERF_EVENT_STATE_ACTIVE)
|
|
|
- intel_pmu_enable_event(hwc, idx);
|
|
|
-
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-static void intel_pmu_reset(void)
|
|
|
-{
|
|
|
- struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
|
|
|
- unsigned long flags;
|
|
|
- int idx;
|
|
|
-
|
|
|
- if (!x86_pmu.num_events)
|
|
|
- return;
|
|
|
-
|
|
|
- local_irq_save(flags);
|
|
|
-
|
|
|
- printk("clearing PMU state on CPU#%d\n", smp_processor_id());
|
|
|
-
|
|
|
- for (idx = 0; idx < x86_pmu.num_events; idx++) {
|
|
|
- checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
|
|
|
- checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
|
|
|
- }
|
|
|
- for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
|
|
|
- checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
|
|
|
- }
|
|
|
- if (ds)
|
|
|
- ds->bts_index = ds->bts_buffer_base;
|
|
|
-
|
|
|
- local_irq_restore(flags);
|
|
|
-}
|
|
|
-
|
|
|
-static int p6_pmu_handle_irq(struct pt_regs *regs)
|
|
|
-{
|
|
|
- struct perf_sample_data data;
|
|
|
- struct cpu_hw_events *cpuc;
|
|
|
- struct perf_event *event;
|
|
|
- struct hw_perf_event *hwc;
|
|
|
- int idx, handled = 0;
|
|
|
- u64 val;
|
|
|
-
|
|
|
- data.addr = 0;
|
|
|
- data.raw = NULL;
|
|
|
-
|
|
|
- cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
-
|
|
|
- for (idx = 0; idx < x86_pmu.num_events; idx++) {
|
|
|
- if (!test_bit(idx, cpuc->active_mask))
|
|
|
- continue;
|
|
|
-
|
|
|
- event = cpuc->events[idx];
|
|
|
- hwc = &event->hw;
|
|
|
-
|
|
|
- val = x86_perf_event_update(event, hwc, idx);
|
|
|
- if (val & (1ULL << (x86_pmu.event_bits - 1)))
|
|
|
- continue;
|
|
|
-
|
|
|
- /*
|
|
|
- * event overflow
|
|
|
- */
|
|
|
- handled = 1;
|
|
|
- data.period = event->hw.last_period;
|
|
|
-
|
|
|
- if (!x86_perf_event_set_period(event, hwc, idx))
|
|
|
- continue;
|
|
|
-
|
|
|
- if (perf_event_overflow(event, 1, &data, regs))
|
|
|
- p6_pmu_disable_event(hwc, idx);
|
|
|
- }
|
|
|
-
|
|
|
- if (handled)
|
|
|
- inc_irq_stat(apic_perf_irqs);
|
|
|
-
|
|
|
- return handled;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * This handler is triggered by the local APIC, so the APIC IRQ handling
|
|
|
- * rules apply:
|
|
|
- */
|
|
|
-static int intel_pmu_handle_irq(struct pt_regs *regs)
|
|
|
+static void x86_pmu_disable(struct perf_event *event)
|
|
|
{
|
|
|
- struct perf_sample_data data;
|
|
|
- struct cpu_hw_events *cpuc;
|
|
|
- int bit, loops;
|
|
|
- u64 ack, status;
|
|
|
-
|
|
|
- data.addr = 0;
|
|
|
- data.raw = NULL;
|
|
|
-
|
|
|
- cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
-
|
|
|
- perf_disable();
|
|
|
- intel_pmu_drain_bts_buffer(cpuc);
|
|
|
- status = intel_pmu_get_status();
|
|
|
- if (!status) {
|
|
|
- perf_enable();
|
|
|
- return 0;
|
|
|
- }
|
|
|
-
|
|
|
- loops = 0;
|
|
|
-again:
|
|
|
- if (++loops > 100) {
|
|
|
- WARN_ONCE(1, "perfevents: irq loop stuck!\n");
|
|
|
- perf_event_print_debug();
|
|
|
- intel_pmu_reset();
|
|
|
- perf_enable();
|
|
|
- return 1;
|
|
|
- }
|
|
|
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
+ int i;
|
|
|
|
|
|
- inc_irq_stat(apic_perf_irqs);
|
|
|
- ack = status;
|
|
|
- for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
|
|
|
- struct perf_event *event = cpuc->events[bit];
|
|
|
+ x86_pmu_stop(event);
|
|
|
|
|
|
- clear_bit(bit, (unsigned long *) &status);
|
|
|
- if (!test_bit(bit, cpuc->active_mask))
|
|
|
- continue;
|
|
|
+ for (i = 0; i < cpuc->n_events; i++) {
|
|
|
+ if (event == cpuc->event_list[i]) {
|
|
|
|
|
|
- if (!intel_pmu_save_and_restart(event))
|
|
|
- continue;
|
|
|
+ if (x86_pmu.put_event_constraints)
|
|
|
+ x86_pmu.put_event_constraints(cpuc, event);
|
|
|
|
|
|
- data.period = event->hw.last_period;
|
|
|
+ while (++i < cpuc->n_events)
|
|
|
+ cpuc->event_list[i-1] = cpuc->event_list[i];
|
|
|
|
|
|
- if (perf_event_overflow(event, 1, &data, regs))
|
|
|
- intel_pmu_disable_event(&event->hw, bit);
|
|
|
+ --cpuc->n_events;
|
|
|
+ break;
|
|
|
+ }
|
|
|
}
|
|
|
-
|
|
|
- intel_pmu_ack_status(ack);
|
|
|
-
|
|
|
- /*
|
|
|
- * Repeat if there is more work to be done:
|
|
|
- */
|
|
|
- status = intel_pmu_get_status();
|
|
|
- if (status)
|
|
|
- goto again;
|
|
|
-
|
|
|
- perf_enable();
|
|
|
-
|
|
|
- return 1;
|
|
|
+ perf_event_update_userpage(event);
|
|
|
}
|
|
|
|
|
|
-static int amd_pmu_handle_irq(struct pt_regs *regs)
|
|
|
+static int x86_pmu_handle_irq(struct pt_regs *regs)
|
|
|
{
|
|
|
struct perf_sample_data data;
|
|
|
struct cpu_hw_events *cpuc;
|
|
@@ -1892,7 +1120,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
|
|
|
continue;
|
|
|
|
|
|
if (perf_event_overflow(event, 1, &data, regs))
|
|
|
- amd_pmu_disable_event(hwc, idx);
|
|
|
+ x86_pmu.disable(hwc, idx);
|
|
|
}
|
|
|
|
|
|
if (handled)
|
|
@@ -1975,194 +1203,137 @@ static __read_mostly struct notifier_block perf_event_nmi_notifier = {
|
|
|
.priority = 1
|
|
|
};
|
|
|
|
|
|
-static __initconst struct x86_pmu p6_pmu = {
|
|
|
- .name = "p6",
|
|
|
- .handle_irq = p6_pmu_handle_irq,
|
|
|
- .disable_all = p6_pmu_disable_all,
|
|
|
- .enable_all = p6_pmu_enable_all,
|
|
|
- .enable = p6_pmu_enable_event,
|
|
|
- .disable = p6_pmu_disable_event,
|
|
|
- .eventsel = MSR_P6_EVNTSEL0,
|
|
|
- .perfctr = MSR_P6_PERFCTR0,
|
|
|
- .event_map = p6_pmu_event_map,
|
|
|
- .raw_event = p6_pmu_raw_event,
|
|
|
- .max_events = ARRAY_SIZE(p6_perfmon_event_map),
|
|
|
- .apic = 1,
|
|
|
- .max_period = (1ULL << 31) - 1,
|
|
|
- .version = 0,
|
|
|
- .num_events = 2,
|
|
|
- /*
|
|
|
- * Events have 40 bits implemented. However they are designed such
|
|
|
- * that bits [32-39] are sign extensions of bit 31. As such the
|
|
|
- * effective width of a event for P6-like PMU is 32 bits only.
|
|
|
- *
|
|
|
- * See IA-32 Intel Architecture Software developer manual Vol 3B
|
|
|
- */
|
|
|
- .event_bits = 32,
|
|
|
- .event_mask = (1ULL << 32) - 1,
|
|
|
- .get_event_idx = intel_get_event_idx,
|
|
|
-};
|
|
|
+static struct event_constraint unconstrained;
|
|
|
+static struct event_constraint emptyconstraint;
|
|
|
|
|
|
-static __initconst struct x86_pmu intel_pmu = {
|
|
|
- .name = "Intel",
|
|
|
- .handle_irq = intel_pmu_handle_irq,
|
|
|
- .disable_all = intel_pmu_disable_all,
|
|
|
- .enable_all = intel_pmu_enable_all,
|
|
|
- .enable = intel_pmu_enable_event,
|
|
|
- .disable = intel_pmu_disable_event,
|
|
|
- .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
|
|
|
- .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
|
|
|
- .event_map = intel_pmu_event_map,
|
|
|
- .raw_event = intel_pmu_raw_event,
|
|
|
- .max_events = ARRAY_SIZE(intel_perfmon_event_map),
|
|
|
- .apic = 1,
|
|
|
- /*
|
|
|
- * Intel PMCs cannot be accessed sanely above 32 bit width,
|
|
|
- * so we install an artificial 1<<31 period regardless of
|
|
|
- * the generic event period:
|
|
|
- */
|
|
|
- .max_period = (1ULL << 31) - 1,
|
|
|
- .enable_bts = intel_pmu_enable_bts,
|
|
|
- .disable_bts = intel_pmu_disable_bts,
|
|
|
- .get_event_idx = intel_get_event_idx,
|
|
|
-};
|
|
|
+static struct event_constraint *
|
|
|
+x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
|
|
|
+{
|
|
|
+ struct event_constraint *c;
|
|
|
|
|
|
-static __initconst struct x86_pmu amd_pmu = {
|
|
|
- .name = "AMD",
|
|
|
- .handle_irq = amd_pmu_handle_irq,
|
|
|
- .disable_all = amd_pmu_disable_all,
|
|
|
- .enable_all = amd_pmu_enable_all,
|
|
|
- .enable = amd_pmu_enable_event,
|
|
|
- .disable = amd_pmu_disable_event,
|
|
|
- .eventsel = MSR_K7_EVNTSEL0,
|
|
|
- .perfctr = MSR_K7_PERFCTR0,
|
|
|
- .event_map = amd_pmu_event_map,
|
|
|
- .raw_event = amd_pmu_raw_event,
|
|
|
- .max_events = ARRAY_SIZE(amd_perfmon_event_map),
|
|
|
- .num_events = 4,
|
|
|
- .event_bits = 48,
|
|
|
- .event_mask = (1ULL << 48) - 1,
|
|
|
- .apic = 1,
|
|
|
- /* use highest bit to detect overflow */
|
|
|
- .max_period = (1ULL << 47) - 1,
|
|
|
- .get_event_idx = gen_get_event_idx,
|
|
|
-};
|
|
|
+ if (x86_pmu.event_constraints) {
|
|
|
+ for_each_event_constraint(c, x86_pmu.event_constraints) {
|
|
|
+ if ((event->hw.config & c->cmask) == c->code)
|
|
|
+ return c;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return &unconstrained;
|
|
|
+}
|
|
|
|
|
|
-static __init int p6_pmu_init(void)
|
|
|
+static int x86_event_sched_in(struct perf_event *event,
|
|
|
+ struct perf_cpu_context *cpuctx)
|
|
|
{
|
|
|
- switch (boot_cpu_data.x86_model) {
|
|
|
- case 1:
|
|
|
- case 3: /* Pentium Pro */
|
|
|
- case 5:
|
|
|
- case 6: /* Pentium II */
|
|
|
- case 7:
|
|
|
- case 8:
|
|
|
- case 11: /* Pentium III */
|
|
|
- event_constraints = intel_p6_event_constraints;
|
|
|
- break;
|
|
|
- case 9:
|
|
|
- case 13:
|
|
|
- /* Pentium M */
|
|
|
- event_constraints = intel_p6_event_constraints;
|
|
|
- break;
|
|
|
- default:
|
|
|
- pr_cont("unsupported p6 CPU model %d ",
|
|
|
- boot_cpu_data.x86_model);
|
|
|
- return -ENODEV;
|
|
|
- }
|
|
|
+ int ret = 0;
|
|
|
|
|
|
- x86_pmu = p6_pmu;
|
|
|
+ event->state = PERF_EVENT_STATE_ACTIVE;
|
|
|
+ event->oncpu = smp_processor_id();
|
|
|
+ event->tstamp_running += event->ctx->time - event->tstamp_stopped;
|
|
|
|
|
|
- return 0;
|
|
|
+ if (!is_x86_event(event))
|
|
|
+ ret = event->pmu->enable(event);
|
|
|
+
|
|
|
+ if (!ret && !is_software_event(event))
|
|
|
+ cpuctx->active_oncpu++;
|
|
|
+
|
|
|
+ if (!ret && event->attr.exclusive)
|
|
|
+ cpuctx->exclusive = 1;
|
|
|
+
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
-static __init int intel_pmu_init(void)
|
|
|
+static void x86_event_sched_out(struct perf_event *event,
|
|
|
+ struct perf_cpu_context *cpuctx)
|
|
|
{
|
|
|
- union cpuid10_edx edx;
|
|
|
- union cpuid10_eax eax;
|
|
|
- unsigned int unused;
|
|
|
- unsigned int ebx;
|
|
|
- int version;
|
|
|
-
|
|
|
- if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
|
|
|
- /* check for P6 processor family */
|
|
|
- if (boot_cpu_data.x86 == 6) {
|
|
|
- return p6_pmu_init();
|
|
|
- } else {
|
|
|
- return -ENODEV;
|
|
|
- }
|
|
|
- }
|
|
|
+ event->state = PERF_EVENT_STATE_INACTIVE;
|
|
|
+ event->oncpu = -1;
|
|
|
|
|
|
- /*
|
|
|
- * Check whether the Architectural PerfMon supports
|
|
|
- * Branch Misses Retired hw_event or not.
|
|
|
- */
|
|
|
- cpuid(10, &eax.full, &ebx, &unused, &edx.full);
|
|
|
- if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
|
|
|
- return -ENODEV;
|
|
|
+ if (!is_x86_event(event))
|
|
|
+ event->pmu->disable(event);
|
|
|
|
|
|
- version = eax.split.version_id;
|
|
|
- if (version < 2)
|
|
|
- return -ENODEV;
|
|
|
+ event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
|
|
|
|
|
|
- x86_pmu = intel_pmu;
|
|
|
- x86_pmu.version = version;
|
|
|
- x86_pmu.num_events = eax.split.num_events;
|
|
|
- x86_pmu.event_bits = eax.split.bit_width;
|
|
|
- x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1;
|
|
|
+ if (!is_software_event(event))
|
|
|
+ cpuctx->active_oncpu--;
|
|
|
|
|
|
- /*
|
|
|
- * Quirk: v2 perfmon does not report fixed-purpose events, so
|
|
|
- * assume at least 3 events:
|
|
|
- */
|
|
|
- x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
|
|
|
+ if (event->attr.exclusive || !cpuctx->active_oncpu)
|
|
|
+ cpuctx->exclusive = 0;
|
|
|
+}
|
|
|
|
|
|
+/*
|
|
|
+ * Called to enable a whole group of events.
|
|
|
+ * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
|
|
|
+ * Assumes the caller has disabled interrupts and has
|
|
|
+ * frozen the PMU with hw_perf_save_disable.
|
|
|
+ *
|
|
|
+ * called with PMU disabled. If successful and return value 1,
|
|
|
+ * then guaranteed to call perf_enable() and hw_perf_enable()
|
|
|
+ */
|
|
|
+int hw_perf_group_sched_in(struct perf_event *leader,
|
|
|
+ struct perf_cpu_context *cpuctx,
|
|
|
+ struct perf_event_context *ctx)
|
|
|
+{
|
|
|
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
+ struct perf_event *sub;
|
|
|
+ int assign[X86_PMC_IDX_MAX];
|
|
|
+ int n0, n1, ret;
|
|
|
+
|
|
|
+ /* n0 = total number of events */
|
|
|
+ n0 = collect_events(cpuc, leader, true);
|
|
|
+ if (n0 < 0)
|
|
|
+ return n0;
|
|
|
+
|
|
|
+ ret = x86_schedule_events(cpuc, n0, assign);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ ret = x86_event_sched_in(leader, cpuctx);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ n1 = 1;
|
|
|
+ list_for_each_entry(sub, &leader->sibling_list, group_entry) {
|
|
|
+ if (sub->state > PERF_EVENT_STATE_OFF) {
|
|
|
+ ret = x86_event_sched_in(sub, cpuctx);
|
|
|
+ if (ret)
|
|
|
+ goto undo;
|
|
|
+ ++n1;
|
|
|
+ }
|
|
|
+ }
|
|
|
/*
|
|
|
- * Install the hw-cache-events table:
|
|
|
+ * copy new assignment, now we know it is possible
|
|
|
+ * will be used by hw_perf_enable()
|
|
|
*/
|
|
|
- switch (boot_cpu_data.x86_model) {
|
|
|
- case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
|
|
|
- case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
|
|
|
- case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
|
|
|
- case 29: /* six-core 45 nm xeon "Dunnington" */
|
|
|
- memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
|
|
|
- sizeof(hw_cache_event_ids));
|
|
|
-
|
|
|
- pr_cont("Core2 events, ");
|
|
|
- event_constraints = intel_core_event_constraints;
|
|
|
- break;
|
|
|
- default:
|
|
|
- case 26:
|
|
|
- memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
|
|
|
- sizeof(hw_cache_event_ids));
|
|
|
+ memcpy(cpuc->assign, assign, n0*sizeof(int));
|
|
|
|
|
|
- event_constraints = intel_nehalem_event_constraints;
|
|
|
- pr_cont("Nehalem/Corei7 events, ");
|
|
|
- break;
|
|
|
- case 28:
|
|
|
- memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
|
|
|
- sizeof(hw_cache_event_ids));
|
|
|
+ cpuc->n_events = n0;
|
|
|
+ cpuc->n_added = n1;
|
|
|
+ ctx->nr_active += n1;
|
|
|
|
|
|
- pr_cont("Atom events, ");
|
|
|
- break;
|
|
|
+ /*
|
|
|
+ * 1 means successful and events are active
|
|
|
+ * This is not quite true because we defer
|
|
|
+ * actual activation until hw_perf_enable() but
|
|
|
+ * this way we* ensure caller won't try to enable
|
|
|
+ * individual events
|
|
|
+ */
|
|
|
+ return 1;
|
|
|
+undo:
|
|
|
+ x86_event_sched_out(leader, cpuctx);
|
|
|
+ n0 = 1;
|
|
|
+ list_for_each_entry(sub, &leader->sibling_list, group_entry) {
|
|
|
+ if (sub->state == PERF_EVENT_STATE_ACTIVE) {
|
|
|
+ x86_event_sched_out(sub, cpuctx);
|
|
|
+ if (++n0 == n1)
|
|
|
+ break;
|
|
|
+ }
|
|
|
}
|
|
|
- return 0;
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
-static __init int amd_pmu_init(void)
|
|
|
-{
|
|
|
- /* Performance-monitoring supported from K7 and later: */
|
|
|
- if (boot_cpu_data.x86 < 6)
|
|
|
- return -ENODEV;
|
|
|
-
|
|
|
- x86_pmu = amd_pmu;
|
|
|
-
|
|
|
- /* Events are common for all AMDs */
|
|
|
- memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
|
|
|
- sizeof(hw_cache_event_ids));
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
+#include "perf_event_amd.c"
|
|
|
+#include "perf_event_p6.c"
|
|
|
+#include "perf_event_intel.c"
|
|
|
|
|
|
static void __init pmu_check_apic(void)
|
|
|
{
|
|
@@ -2220,6 +1391,10 @@ void __init init_hw_perf_events(void)
|
|
|
perf_events_lapic_init();
|
|
|
register_die_notifier(&perf_event_nmi_notifier);
|
|
|
|
|
|
+ unconstrained = (struct event_constraint)
|
|
|
+ __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1,
|
|
|
+ 0, x86_pmu.num_events);
|
|
|
+
|
|
|
pr_info("... version: %d\n", x86_pmu.version);
|
|
|
pr_info("... bit width: %d\n", x86_pmu.event_bits);
|
|
|
pr_info("... generic registers: %d\n", x86_pmu.num_events);
|
|
@@ -2237,50 +1412,79 @@ static inline void x86_pmu_read(struct perf_event *event)
|
|
|
static const struct pmu pmu = {
|
|
|
.enable = x86_pmu_enable,
|
|
|
.disable = x86_pmu_disable,
|
|
|
+ .start = x86_pmu_start,
|
|
|
+ .stop = x86_pmu_stop,
|
|
|
.read = x86_pmu_read,
|
|
|
.unthrottle = x86_pmu_unthrottle,
|
|
|
};
|
|
|
|
|
|
-static int
|
|
|
-validate_event(struct cpu_hw_events *cpuc, struct perf_event *event)
|
|
|
-{
|
|
|
- struct hw_perf_event fake_event = event->hw;
|
|
|
-
|
|
|
- if (event->pmu && event->pmu != &pmu)
|
|
|
- return 0;
|
|
|
-
|
|
|
- return x86_schedule_event(cpuc, &fake_event) >= 0;
|
|
|
-}
|
|
|
-
|
|
|
+/*
|
|
|
+ * validate a single event group
|
|
|
+ *
|
|
|
+ * validation include:
|
|
|
+ * - check events are compatible which each other
|
|
|
+ * - events do not compete for the same counter
|
|
|
+ * - number of events <= number of counters
|
|
|
+ *
|
|
|
+ * validation ensures the group can be loaded onto the
|
|
|
+ * PMU if it was the only group available.
|
|
|
+ */
|
|
|
static int validate_group(struct perf_event *event)
|
|
|
{
|
|
|
- struct perf_event *sibling, *leader = event->group_leader;
|
|
|
- struct cpu_hw_events fake_pmu;
|
|
|
+ struct perf_event *leader = event->group_leader;
|
|
|
+ struct cpu_hw_events *fake_cpuc;
|
|
|
+ int ret, n;
|
|
|
|
|
|
- memset(&fake_pmu, 0, sizeof(fake_pmu));
|
|
|
+ ret = -ENOMEM;
|
|
|
+ fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
|
|
|
+ if (!fake_cpuc)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * the event is not yet connected with its
|
|
|
+ * siblings therefore we must first collect
|
|
|
+ * existing siblings, then add the new event
|
|
|
+ * before we can simulate the scheduling
|
|
|
+ */
|
|
|
+ ret = -ENOSPC;
|
|
|
+ n = collect_events(fake_cpuc, leader, true);
|
|
|
+ if (n < 0)
|
|
|
+ goto out_free;
|
|
|
|
|
|
- if (!validate_event(&fake_pmu, leader))
|
|
|
- return -ENOSPC;
|
|
|
+ fake_cpuc->n_events = n;
|
|
|
+ n = collect_events(fake_cpuc, event, false);
|
|
|
+ if (n < 0)
|
|
|
+ goto out_free;
|
|
|
|
|
|
- list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
|
|
|
- if (!validate_event(&fake_pmu, sibling))
|
|
|
- return -ENOSPC;
|
|
|
- }
|
|
|
+ fake_cpuc->n_events = n;
|
|
|
|
|
|
- if (!validate_event(&fake_pmu, event))
|
|
|
- return -ENOSPC;
|
|
|
+ ret = x86_schedule_events(fake_cpuc, n, NULL);
|
|
|
|
|
|
- return 0;
|
|
|
+out_free:
|
|
|
+ kfree(fake_cpuc);
|
|
|
+out:
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
const struct pmu *hw_perf_event_init(struct perf_event *event)
|
|
|
{
|
|
|
+ const struct pmu *tmp;
|
|
|
int err;
|
|
|
|
|
|
err = __hw_perf_event_init(event);
|
|
|
if (!err) {
|
|
|
+ /*
|
|
|
+ * we temporarily connect event to its pmu
|
|
|
+ * such that validate_group() can classify
|
|
|
+ * it as an x86 event using is_x86_event()
|
|
|
+ */
|
|
|
+ tmp = event->pmu;
|
|
|
+ event->pmu = &pmu;
|
|
|
+
|
|
|
if (event->group_leader != event)
|
|
|
err = validate_group(event);
|
|
|
+
|
|
|
+ event->pmu = tmp;
|
|
|
}
|
|
|
if (err) {
|
|
|
if (event->destroy)
|
|
@@ -2304,7 +1508,6 @@ void callchain_store(struct perf_callchain_entry *entry, u64 ip)
|
|
|
|
|
|
static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
|
|
|
static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
|
|
|
-static DEFINE_PER_CPU(int, in_ignored_frame);
|
|
|
|
|
|
|
|
|
static void
|
|
@@ -2320,10 +1523,6 @@ static void backtrace_warning(void *data, char *msg)
|
|
|
|
|
|
static int backtrace_stack(void *data, char *name)
|
|
|
{
|
|
|
- per_cpu(in_ignored_frame, smp_processor_id()) =
|
|
|
- x86_is_stack_id(NMI_STACK, name) ||
|
|
|
- x86_is_stack_id(DEBUG_STACK, name);
|
|
|
-
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -2331,9 +1530,6 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
|
|
|
{
|
|
|
struct perf_callchain_entry *entry = data;
|
|
|
|
|
|
- if (per_cpu(in_ignored_frame, smp_processor_id()))
|
|
|
- return;
|
|
|
-
|
|
|
if (reliable)
|
|
|
callchain_store(entry, addr);
|
|
|
}
|
|
@@ -2440,9 +1636,6 @@ perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
|
|
|
|
|
|
is_user = user_mode(regs);
|
|
|
|
|
|
- if (!current || current->pid == 0)
|
|
|
- return;
|
|
|
-
|
|
|
if (is_user && current->state != TASK_RUNNING)
|
|
|
return;
|
|
|
|
|
@@ -2472,4 +1665,25 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
|
|
|
void hw_perf_event_setup_online(int cpu)
|
|
|
{
|
|
|
init_debug_store_on_cpu(cpu);
|
|
|
+
|
|
|
+ switch (boot_cpu_data.x86_vendor) {
|
|
|
+ case X86_VENDOR_AMD:
|
|
|
+ amd_pmu_cpu_online(cpu);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ return;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+void hw_perf_event_setup_offline(int cpu)
|
|
|
+{
|
|
|
+ init_debug_store_on_cpu(cpu);
|
|
|
+
|
|
|
+ switch (boot_cpu_data.x86_vendor) {
|
|
|
+ case X86_VENDOR_AMD:
|
|
|
+ amd_pmu_cpu_offline(cpu);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ return;
|
|
|
+ }
|
|
|
}
|