|
@@ -3527,7 +3527,7 @@ static void perf_event_for_each(struct perf_event *event,
|
|
|
static int perf_event_period(struct perf_event *event, u64 __user *arg)
|
|
|
{
|
|
|
struct perf_event_context *ctx = event->ctx;
|
|
|
- int ret = 0;
|
|
|
+ int ret = 0, active;
|
|
|
u64 value;
|
|
|
|
|
|
if (!is_sampling_event(event))
|
|
@@ -3551,6 +3551,20 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
|
|
|
event->attr.sample_period = value;
|
|
|
event->hw.sample_period = value;
|
|
|
}
|
|
|
+
|
|
|
+ active = (event->state == PERF_EVENT_STATE_ACTIVE);
|
|
|
+ if (active) {
|
|
|
+ perf_pmu_disable(ctx->pmu);
|
|
|
+ event->pmu->stop(event, PERF_EF_UPDATE);
|
|
|
+ }
|
|
|
+
|
|
|
+ local64_set(&event->hw.period_left, 0);
|
|
|
+
|
|
|
+ if (active) {
|
|
|
+ event->pmu->start(event, PERF_EF_RELOAD);
|
|
|
+ perf_pmu_enable(ctx->pmu);
|
|
|
+ }
|
|
|
+
|
|
|
unlock:
|
|
|
raw_spin_unlock_irq(&ctx->lock);
|
|
|
|