|
@@ -108,10 +108,9 @@ static u64 precise_store_data(u64 status)
|
|
return val;
|
|
return val;
|
|
}
|
|
}
|
|
|
|
|
|
-static u64 precise_store_data_hsw(struct perf_event *event, u64 status)
|
|
|
|
|
|
+static u64 precise_datala_hsw(struct perf_event *event, u64 status)
|
|
{
|
|
{
|
|
union perf_mem_data_src dse;
|
|
union perf_mem_data_src dse;
|
|
- u64 cfg = event->hw.config & INTEL_ARCH_EVENT_MASK;
|
|
|
|
|
|
|
|
dse.val = PERF_MEM_NA;
|
|
dse.val = PERF_MEM_NA;
|
|
|
|
|
|
@@ -128,15 +127,12 @@ static u64 precise_store_data_hsw(struct perf_event *event, u64 status)
|
|
* MEM_UOPS_RETIRED.SPLIT_STORES
|
|
* MEM_UOPS_RETIRED.SPLIT_STORES
|
|
* MEM_UOPS_RETIRED.ALL_STORES
|
|
* MEM_UOPS_RETIRED.ALL_STORES
|
|
*/
|
|
*/
|
|
- if (cfg != 0x12d0 && cfg != 0x22d0 && cfg != 0x42d0 && cfg != 0x82d0)
|
|
|
|
- return dse.val;
|
|
|
|
-
|
|
|
|
- if (status & 1)
|
|
|
|
- dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
|
|
|
|
- else
|
|
|
|
- dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_MISS;
|
|
|
|
-
|
|
|
|
- /* Nothing else supported. Sorry. */
|
|
|
|
|
|
+ if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) {
|
|
|
|
+ if (status & 1)
|
|
|
|
+ dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
|
|
|
|
+ else
|
|
|
|
+ dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_MISS;
|
|
|
|
+ }
|
|
return dse.val;
|
|
return dse.val;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -825,6 +821,10 @@ static inline u64 intel_hsw_transaction(struct pebs_record_hsw *pebs)
|
|
static void __intel_pmu_pebs_event(struct perf_event *event,
|
|
static void __intel_pmu_pebs_event(struct perf_event *event,
|
|
struct pt_regs *iregs, void *__pebs)
|
|
struct pt_regs *iregs, void *__pebs)
|
|
{
|
|
{
|
|
|
|
+#define PERF_X86_EVENT_PEBS_HSW_PREC \
|
|
|
|
+ (PERF_X86_EVENT_PEBS_ST_HSW | \
|
|
|
|
+ PERF_X86_EVENT_PEBS_LD_HSW | \
|
|
|
|
+ PERF_X86_EVENT_PEBS_NA_HSW)
|
|
/*
|
|
/*
|
|
* We cast to the biggest pebs_record but are careful not to
|
|
* We cast to the biggest pebs_record but are careful not to
|
|
* unconditionally access the 'extra' entries.
|
|
* unconditionally access the 'extra' entries.
|
|
@@ -834,47 +834,40 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
|
|
struct perf_sample_data data;
|
|
struct perf_sample_data data;
|
|
struct pt_regs regs;
|
|
struct pt_regs regs;
|
|
u64 sample_type;
|
|
u64 sample_type;
|
|
- int fll, fst;
|
|
|
|
|
|
+ int fll, fst, dsrc;
|
|
|
|
+ int fl = event->hw.flags;
|
|
|
|
|
|
if (!intel_pmu_save_and_restart(event))
|
|
if (!intel_pmu_save_and_restart(event))
|
|
return;
|
|
return;
|
|
|
|
|
|
- fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT;
|
|
|
|
- fst = event->hw.flags & (PERF_X86_EVENT_PEBS_ST |
|
|
|
|
- PERF_X86_EVENT_PEBS_ST_HSW |
|
|
|
|
- PERF_X86_EVENT_PEBS_LD_HSW |
|
|
|
|
- PERF_X86_EVENT_PEBS_NA_HSW);
|
|
|
|
|
|
+ sample_type = event->attr.sample_type;
|
|
|
|
+ dsrc = sample_type & PERF_SAMPLE_DATA_SRC;
|
|
|
|
+
|
|
|
|
+ fll = fl & PERF_X86_EVENT_PEBS_LDLAT;
|
|
|
|
+ fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);
|
|
|
|
|
|
perf_sample_data_init(&data, 0, event->hw.last_period);
|
|
perf_sample_data_init(&data, 0, event->hw.last_period);
|
|
|
|
|
|
data.period = event->hw.last_period;
|
|
data.period = event->hw.last_period;
|
|
- sample_type = event->attr.sample_type;
|
|
|
|
|
|
|
|
/*
|
|
/*
|
|
- * if PEBS-LL or PreciseStore
|
|
|
|
|
|
+ * Use latency for weight (only avail with PEBS-LL)
|
|
*/
|
|
*/
|
|
- if (fll || fst) {
|
|
|
|
- /*
|
|
|
|
- * Use latency for weight (only avail with PEBS-LL)
|
|
|
|
- */
|
|
|
|
- if (fll && (sample_type & PERF_SAMPLE_WEIGHT))
|
|
|
|
- data.weight = pebs->lat;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * data.data_src encodes the data source
|
|
|
|
- */
|
|
|
|
- if (sample_type & PERF_SAMPLE_DATA_SRC) {
|
|
|
|
- if (fll)
|
|
|
|
- data.data_src.val = load_latency_data(pebs->dse);
|
|
|
|
- else if (event->hw.flags &
|
|
|
|
- (PERF_X86_EVENT_PEBS_ST_HSW|
|
|
|
|
- PERF_X86_EVENT_PEBS_LD_HSW|
|
|
|
|
- PERF_X86_EVENT_PEBS_NA_HSW))
|
|
|
|
- data.data_src.val =
|
|
|
|
- precise_store_data_hsw(event, pebs->dse);
|
|
|
|
- else
|
|
|
|
- data.data_src.val = precise_store_data(pebs->dse);
|
|
|
|
- }
|
|
|
|
|
|
+ if (fll && (sample_type & PERF_SAMPLE_WEIGHT))
|
|
|
|
+ data.weight = pebs->lat;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * data.data_src encodes the data source
|
|
|
|
+ */
|
|
|
|
+ if (dsrc) {
|
|
|
|
+ u64 val = PERF_MEM_NA;
|
|
|
|
+ if (fll)
|
|
|
|
+ val = load_latency_data(pebs->dse);
|
|
|
|
+ else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC))
|
|
|
|
+ val = precise_datala_hsw(event, pebs->dse);
|
|
|
|
+ else if (fst)
|
|
|
|
+ val = precise_store_data(pebs->dse);
|
|
|
|
+ data.data_src.val = val;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -901,16 +894,16 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
|
|
else
|
|
else
|
|
regs.flags &= ~PERF_EFLAGS_EXACT;
|
|
regs.flags &= ~PERF_EFLAGS_EXACT;
|
|
|
|
|
|
- if ((event->attr.sample_type & PERF_SAMPLE_ADDR) &&
|
|
|
|
|
|
+ if ((sample_type & PERF_SAMPLE_ADDR) &&
|
|
x86_pmu.intel_cap.pebs_format >= 1)
|
|
x86_pmu.intel_cap.pebs_format >= 1)
|
|
data.addr = pebs->dla;
|
|
data.addr = pebs->dla;
|
|
|
|
|
|
if (x86_pmu.intel_cap.pebs_format >= 2) {
|
|
if (x86_pmu.intel_cap.pebs_format >= 2) {
|
|
/* Only set the TSX weight when no memory weight. */
|
|
/* Only set the TSX weight when no memory weight. */
|
|
- if ((event->attr.sample_type & PERF_SAMPLE_WEIGHT) && !fll)
|
|
|
|
|
|
+ if ((sample_type & PERF_SAMPLE_WEIGHT) && !fll)
|
|
data.weight = intel_hsw_weight(pebs);
|
|
data.weight = intel_hsw_weight(pebs);
|
|
|
|
|
|
- if (event->attr.sample_type & PERF_SAMPLE_TRANSACTION)
|
|
|
|
|
|
+ if (sample_type & PERF_SAMPLE_TRANSACTION)
|
|
data.txn = intel_hsw_transaction(pebs);
|
|
data.txn = intel_hsw_transaction(pebs);
|
|
}
|
|
}
|
|
|
|
|