|
@@ -903,10 +903,16 @@ static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
|
|
{
|
|
{
|
|
struct debug_store *ds = cpuc->ds;
|
|
struct debug_store *ds = cpuc->ds;
|
|
u64 threshold;
|
|
u64 threshold;
|
|
|
|
+ int reserved;
|
|
|
|
+
|
|
|
|
+ if (x86_pmu.flags & PMU_FL_PEBS_ALL)
|
|
|
|
+ reserved = x86_pmu.max_pebs_events + x86_pmu.num_counters_fixed;
|
|
|
|
+ else
|
|
|
|
+ reserved = x86_pmu.max_pebs_events;
|
|
|
|
|
|
if (cpuc->n_pebs == cpuc->n_large_pebs) {
|
|
if (cpuc->n_pebs == cpuc->n_large_pebs) {
|
|
threshold = ds->pebs_absolute_maximum -
|
|
threshold = ds->pebs_absolute_maximum -
|
|
- x86_pmu.max_pebs_events * x86_pmu.pebs_record_size;
|
|
|
|
|
|
+ reserved * x86_pmu.pebs_record_size;
|
|
} else {
|
|
} else {
|
|
threshold = ds->pebs_buffer_base + x86_pmu.pebs_record_size;
|
|
threshold = ds->pebs_buffer_base + x86_pmu.pebs_record_size;
|
|
}
|
|
}
|
|
@@ -970,7 +976,11 @@ void intel_pmu_pebs_enable(struct perf_event *event)
|
|
* This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD.
|
|
* This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD.
|
|
*/
|
|
*/
|
|
if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
|
|
if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
|
|
- ds->pebs_event_reset[hwc->idx] =
|
|
|
|
|
|
+ unsigned int idx = hwc->idx;
|
|
|
|
+
|
|
|
|
+ if (idx >= INTEL_PMC_IDX_FIXED)
|
|
|
|
+ idx = MAX_PEBS_EVENTS + (idx - INTEL_PMC_IDX_FIXED);
|
|
|
|
+ ds->pebs_event_reset[idx] =
|
|
(u64)(-hwc->sample_period) & x86_pmu.cntval_mask;
|
|
(u64)(-hwc->sample_period) & x86_pmu.cntval_mask;
|
|
} else {
|
|
} else {
|
|
ds->pebs_event_reset[hwc->idx] = 0;
|
|
ds->pebs_event_reset[hwc->idx] = 0;
|
|
@@ -1488,9 +1498,10 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
|
|
struct debug_store *ds = cpuc->ds;
|
|
struct debug_store *ds = cpuc->ds;
|
|
struct perf_event *event;
|
|
struct perf_event *event;
|
|
void *base, *at, *top;
|
|
void *base, *at, *top;
|
|
- short counts[MAX_PEBS_EVENTS] = {};
|
|
|
|
- short error[MAX_PEBS_EVENTS] = {};
|
|
|
|
- int bit, i;
|
|
|
|
|
|
+ short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
|
|
|
|
+ short error[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
|
|
|
|
+ int bit, i, size;
|
|
|
|
+ u64 mask;
|
|
|
|
|
|
if (!x86_pmu.pebs_active)
|
|
if (!x86_pmu.pebs_active)
|
|
return;
|
|
return;
|
|
@@ -1500,6 +1511,13 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
|
|
|
|
|
|
ds->pebs_index = ds->pebs_buffer_base;
|
|
ds->pebs_index = ds->pebs_buffer_base;
|
|
|
|
|
|
|
|
+ mask = (1ULL << x86_pmu.max_pebs_events) - 1;
|
|
|
|
+ size = x86_pmu.max_pebs_events;
|
|
|
|
+ if (x86_pmu.flags & PMU_FL_PEBS_ALL) {
|
|
|
|
+ mask |= ((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED;
|
|
|
|
+ size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed;
|
|
|
|
+ }
|
|
|
|
+
|
|
if (unlikely(base >= top)) {
|
|
if (unlikely(base >= top)) {
|
|
/*
|
|
/*
|
|
* The drain_pebs() could be called twice in a short period
|
|
* The drain_pebs() could be called twice in a short period
|
|
@@ -1509,7 +1527,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
|
|
* update the event->count for this case.
|
|
* update the event->count for this case.
|
|
*/
|
|
*/
|
|
for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled,
|
|
for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled,
|
|
- x86_pmu.max_pebs_events) {
|
|
|
|
|
|
+ size) {
|
|
event = cpuc->events[bit];
|
|
event = cpuc->events[bit];
|
|
if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
|
|
if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
|
|
intel_pmu_save_and_restart_reload(event, 0);
|
|
intel_pmu_save_and_restart_reload(event, 0);
|
|
@@ -1522,12 +1540,12 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
|
|
u64 pebs_status;
|
|
u64 pebs_status;
|
|
|
|
|
|
pebs_status = p->status & cpuc->pebs_enabled;
|
|
pebs_status = p->status & cpuc->pebs_enabled;
|
|
- pebs_status &= (1ULL << x86_pmu.max_pebs_events) - 1;
|
|
|
|
|
|
+ pebs_status &= mask;
|
|
|
|
|
|
/* PEBS v3 has more accurate status bits */
|
|
/* PEBS v3 has more accurate status bits */
|
|
if (x86_pmu.intel_cap.pebs_format >= 3) {
|
|
if (x86_pmu.intel_cap.pebs_format >= 3) {
|
|
for_each_set_bit(bit, (unsigned long *)&pebs_status,
|
|
for_each_set_bit(bit, (unsigned long *)&pebs_status,
|
|
- x86_pmu.max_pebs_events)
|
|
|
|
|
|
+ size)
|
|
counts[bit]++;
|
|
counts[bit]++;
|
|
|
|
|
|
continue;
|
|
continue;
|
|
@@ -1575,7 +1593,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
|
|
counts[bit]++;
|
|
counts[bit]++;
|
|
}
|
|
}
|
|
|
|
|
|
- for (bit = 0; bit < x86_pmu.max_pebs_events; bit++) {
|
|
|
|
|
|
+ for (bit = 0; bit < size; bit++) {
|
|
if ((counts[bit] == 0) && (error[bit] == 0))
|
|
if ((counts[bit] == 0) && (error[bit] == 0))
|
|
continue;
|
|
continue;
|
|
|
|
|