|
@@ -897,6 +897,26 @@ void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu,
|
|
|
count->run = count->run - tmp.run;
|
|
|
}
|
|
|
|
|
|
+void perf_counts_values__scale(struct perf_counts_values *count,
|
|
|
+ bool scale, s8 *pscaled)
|
|
|
+{
|
|
|
+ s8 scaled = 0;
|
|
|
+
|
|
|
+ if (scale) {
|
|
|
+ if (count->run == 0) {
|
|
|
+ scaled = -1;
|
|
|
+ count->val = 0;
|
|
|
+ } else if (count->run < count->ena) {
|
|
|
+ scaled = 1;
|
|
|
+ count->val = (u64)((double) count->val * count->ena / count->run + 0.5);
|
|
|
+ }
|
|
|
+ } else
|
|
|
+ count->ena = count->run = 0;
|
|
|
+
|
|
|
+ if (pscaled)
|
|
|
+ *pscaled = scaled;
|
|
|
+}
|
|
|
+
|
|
|
int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
|
|
|
int cpu, int thread, bool scale)
|
|
|
{
|
|
@@ -913,15 +933,7 @@ int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
|
|
|
return -errno;
|
|
|
|
|
|
perf_evsel__compute_deltas(evsel, cpu, &count);
|
|
|
-
|
|
|
- if (scale) {
|
|
|
- if (count.run == 0)
|
|
|
- count.val = 0;
|
|
|
- else if (count.run < count.ena)
|
|
|
- count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
|
|
|
- } else
|
|
|
- count.ena = count.run = 0;
|
|
|
-
|
|
|
+ perf_counts_values__scale(&count, scale, NULL);
|
|
|
evsel->counts->cpu[cpu] = count;
|
|
|
return 0;
|
|
|
}
|
|
@@ -956,22 +968,7 @@ int __perf_evsel__read(struct perf_evsel *evsel,
|
|
|
}
|
|
|
|
|
|
perf_evsel__compute_deltas(evsel, -1, aggr);
|
|
|
-
|
|
|
- evsel->counts->scaled = 0;
|
|
|
- if (scale) {
|
|
|
- if (aggr->run == 0) {
|
|
|
- evsel->counts->scaled = -1;
|
|
|
- aggr->val = 0;
|
|
|
- return 0;
|
|
|
- }
|
|
|
-
|
|
|
- if (aggr->run < aggr->ena) {
|
|
|
- evsel->counts->scaled = 1;
|
|
|
- aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
|
|
|
- }
|
|
|
- } else
|
|
|
- aggr->ena = aggr->run = 0;
|
|
|
-
|
|
|
+ perf_counts_values__scale(aggr, scale, &evsel->counts->scaled);
|
|
|
return 0;
|
|
|
}
|
|
|
|