|
@@ -28,6 +28,11 @@
|
|
static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
|
|
static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
|
|
static DEFINE_PER_CPU(int, cpu_irq);
|
|
static DEFINE_PER_CPU(int, cpu_irq);
|
|
|
|
|
|
|
|
+static inline u64 arm_pmu_max_period(void)
|
|
|
|
+{
|
|
|
|
+ return (1ULL << 32) - 1;
|
|
|
|
+}
|
|
|
|
+
|
|
static int
|
|
static int
|
|
armpmu_map_cache_event(const unsigned (*cache_map)
|
|
armpmu_map_cache_event(const unsigned (*cache_map)
|
|
[PERF_COUNT_HW_CACHE_MAX]
|
|
[PERF_COUNT_HW_CACHE_MAX]
|
|
@@ -114,8 +119,10 @@ int armpmu_event_set_period(struct perf_event *event)
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
s64 left = local64_read(&hwc->period_left);
|
|
s64 left = local64_read(&hwc->period_left);
|
|
s64 period = hwc->sample_period;
|
|
s64 period = hwc->sample_period;
|
|
|
|
+ u64 max_period;
|
|
int ret = 0;
|
|
int ret = 0;
|
|
|
|
|
|
|
|
+ max_period = arm_pmu_max_period();
|
|
if (unlikely(left <= -period)) {
|
|
if (unlikely(left <= -period)) {
|
|
left = period;
|
|
left = period;
|
|
local64_set(&hwc->period_left, left);
|
|
local64_set(&hwc->period_left, left);
|
|
@@ -136,8 +143,8 @@ int armpmu_event_set_period(struct perf_event *event)
|
|
* effect we are reducing max_period to account for
|
|
* effect we are reducing max_period to account for
|
|
* interrupt latency (and we are being very conservative).
|
|
* interrupt latency (and we are being very conservative).
|
|
*/
|
|
*/
|
|
- if (left > (armpmu->max_period >> 1))
|
|
|
|
- left = armpmu->max_period >> 1;
|
|
|
|
|
|
+ if (left > (max_period >> 1))
|
|
|
|
+ left = (max_period >> 1);
|
|
|
|
|
|
local64_set(&hwc->prev_count, (u64)-left);
|
|
local64_set(&hwc->prev_count, (u64)-left);
|
|
|
|
|
|
@@ -153,6 +160,7 @@ u64 armpmu_event_update(struct perf_event *event)
|
|
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
|
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
u64 delta, prev_raw_count, new_raw_count;
|
|
u64 delta, prev_raw_count, new_raw_count;
|
|
|
|
+ u64 max_period = arm_pmu_max_period();
|
|
|
|
|
|
again:
|
|
again:
|
|
prev_raw_count = local64_read(&hwc->prev_count);
|
|
prev_raw_count = local64_read(&hwc->prev_count);
|
|
@@ -162,7 +170,7 @@ again:
|
|
new_raw_count) != prev_raw_count)
|
|
new_raw_count) != prev_raw_count)
|
|
goto again;
|
|
goto again;
|
|
|
|
|
|
- delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
|
|
|
|
|
|
+ delta = (new_raw_count - prev_raw_count) & max_period;
|
|
|
|
|
|
local64_add(delta, &event->count);
|
|
local64_add(delta, &event->count);
|
|
local64_sub(delta, &hwc->period_left);
|
|
local64_sub(delta, &hwc->period_left);
|
|
@@ -402,7 +410,7 @@ __hw_perf_event_init(struct perf_event *event)
|
|
* is far less likely to overtake the previous one unless
|
|
* is far less likely to overtake the previous one unless
|
|
* you have some serious IRQ latency issues.
|
|
* you have some serious IRQ latency issues.
|
|
*/
|
|
*/
|
|
- hwc->sample_period = armpmu->max_period >> 1;
|
|
|
|
|
|
+ hwc->sample_period = arm_pmu_max_period() >> 1;
|
|
hwc->last_period = hwc->sample_period;
|
|
hwc->last_period = hwc->sample_period;
|
|
local64_set(&hwc->period_left, hwc->sample_period);
|
|
local64_set(&hwc->period_left, hwc->sample_period);
|
|
}
|
|
}
|