|
@@ -108,15 +108,31 @@ static u64 precise_store_data(u64 status)
|
|
return val;
|
|
return val;
|
|
}
|
|
}
|
|
|
|
|
|
-static u64 precise_store_data_hsw(u64 status)
|
|
|
|
|
|
+static u64 precise_store_data_hsw(struct perf_event *event, u64 status)
|
|
{
|
|
{
|
|
union perf_mem_data_src dse;
|
|
union perf_mem_data_src dse;
|
|
|
|
+ u64 cfg = event->hw.config & INTEL_ARCH_EVENT_MASK;
|
|
|
|
|
|
dse.val = 0;
|
|
dse.val = 0;
|
|
dse.mem_op = PERF_MEM_OP_STORE;
|
|
dse.mem_op = PERF_MEM_OP_STORE;
|
|
dse.mem_lvl = PERF_MEM_LVL_NA;
|
|
dse.mem_lvl = PERF_MEM_LVL_NA;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * L1 info only valid for following events:
|
|
|
|
+ *
|
|
|
|
+ * MEM_UOPS_RETIRED.STLB_MISS_STORES
|
|
|
|
+ * MEM_UOPS_RETIRED.LOCK_STORES
|
|
|
|
+ * MEM_UOPS_RETIRED.SPLIT_STORES
|
|
|
|
+ * MEM_UOPS_RETIRED.ALL_STORES
|
|
|
|
+ */
|
|
|
|
+ if (cfg != 0x12d0 && cfg != 0x22d0 && cfg != 0x42d0 && cfg != 0x82d0)
|
|
|
|
+ return dse.mem_lvl;
|
|
|
|
+
|
|
if (status & 1)
|
|
if (status & 1)
|
|
- dse.mem_lvl = PERF_MEM_LVL_L1;
|
|
|
|
|
|
+ dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
|
|
|
|
+ else
|
|
|
|
+ dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_MISS;
|
|
|
|
+
|
|
/* Nothing else supported. Sorry. */
|
|
/* Nothing else supported. Sorry. */
|
|
return dse.val;
|
|
return dse.val;
|
|
}
|
|
}
|
|
@@ -887,7 +903,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
|
|
data.data_src.val = load_latency_data(pebs->dse);
|
|
data.data_src.val = load_latency_data(pebs->dse);
|
|
else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW)
|
|
else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW)
|
|
data.data_src.val =
|
|
data.data_src.val =
|
|
- precise_store_data_hsw(pebs->dse);
|
|
|
|
|
|
+ precise_store_data_hsw(event, pebs->dse);
|
|
else
|
|
else
|
|
data.data_src.val = precise_store_data(pebs->dse);
|
|
data.data_src.val = precise_store_data(pebs->dse);
|
|
}
|
|
}
|