|
@@ -1868,8 +1868,6 @@ event_sched_in(struct perf_event *event,
|
|
|
|
|
|
perf_pmu_disable(event->pmu);
|
|
|
|
|
|
- event->tstamp_running += tstamp - event->tstamp_stopped;
|
|
|
-
|
|
|
perf_set_shadow_time(event, ctx, tstamp);
|
|
|
|
|
|
perf_log_itrace_start(event);
|
|
@@ -1881,6 +1879,8 @@ event_sched_in(struct perf_event *event,
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
+ event->tstamp_running += tstamp - event->tstamp_stopped;
|
|
|
+
|
|
|
if (!is_software_event(event))
|
|
|
cpuctx->active_oncpu++;
|
|
|
if (!ctx->nr_active++)
|
|
@@ -3958,28 +3958,21 @@ static void perf_event_for_each(struct perf_event *event,
|
|
|
perf_event_for_each_child(sibling, func);
|
|
|
}
|
|
|
|
|
|
-static int perf_event_period(struct perf_event *event, u64 __user *arg)
|
|
|
-{
|
|
|
- struct perf_event_context *ctx = event->ctx;
|
|
|
- int ret = 0, active;
|
|
|
+struct period_event {
|
|
|
+ struct perf_event *event;
|
|
|
u64 value;
|
|
|
+};
|
|
|
|
|
|
- if (!is_sampling_event(event))
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- if (copy_from_user(&value, arg, sizeof(value)))
|
|
|
- return -EFAULT;
|
|
|
-
|
|
|
- if (!value)
|
|
|
- return -EINVAL;
|
|
|
+static int __perf_event_period(void *info)
|
|
|
+{
|
|
|
+ struct period_event *pe = info;
|
|
|
+ struct perf_event *event = pe->event;
|
|
|
+ struct perf_event_context *ctx = event->ctx;
|
|
|
+ u64 value = pe->value;
|
|
|
+ bool active;
|
|
|
|
|
|
- raw_spin_lock_irq(&ctx->lock);
|
|
|
+ raw_spin_lock(&ctx->lock);
|
|
|
if (event->attr.freq) {
|
|
|
- if (value > sysctl_perf_event_sample_rate) {
|
|
|
- ret = -EINVAL;
|
|
|
- goto unlock;
|
|
|
- }
|
|
|
-
|
|
|
event->attr.sample_freq = value;
|
|
|
} else {
|
|
|
event->attr.sample_period = value;
|
|
@@ -3998,11 +3991,53 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
|
|
|
event->pmu->start(event, PERF_EF_RELOAD);
|
|
|
perf_pmu_enable(ctx->pmu);
|
|
|
}
|
|
|
+ raw_spin_unlock(&ctx->lock);
|
|
|
|
|
|
-unlock:
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int perf_event_period(struct perf_event *event, u64 __user *arg)
|
|
|
+{
|
|
|
+ struct period_event pe = { .event = event, };
|
|
|
+ struct perf_event_context *ctx = event->ctx;
|
|
|
+ struct task_struct *task;
|
|
|
+ u64 value;
|
|
|
+
|
|
|
+ if (!is_sampling_event(event))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (copy_from_user(&value, arg, sizeof(value)))
|
|
|
+ return -EFAULT;
|
|
|
+
|
|
|
+ if (!value)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (event->attr.freq && value > sysctl_perf_event_sample_rate)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ task = ctx->task;
|
|
|
+ pe.value = value;
|
|
|
+
|
|
|
+ if (!task) {
|
|
|
+ cpu_function_call(event->cpu, __perf_event_period, &pe);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+retry:
|
|
|
+ if (!task_function_call(task, __perf_event_period, &pe))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ raw_spin_lock_irq(&ctx->lock);
|
|
|
+ if (ctx->is_active) {
|
|
|
+ raw_spin_unlock_irq(&ctx->lock);
|
|
|
+ task = ctx->task;
|
|
|
+ goto retry;
|
|
|
+ }
|
|
|
+
|
|
|
+ __perf_event_period(&pe);
|
|
|
raw_spin_unlock_irq(&ctx->lock);
|
|
|
|
|
|
- return ret;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
static const struct file_operations perf_fops;
|
|
@@ -4740,12 +4775,20 @@ static const struct file_operations perf_fops = {
|
|
|
* to user-space before waking everybody up.
|
|
|
*/
|
|
|
|
|
|
+static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
|
|
|
+{
|
|
|
+ /* only the parent has fasync state */
|
|
|
+ if (event->parent)
|
|
|
+ event = event->parent;
|
|
|
+ return &event->fasync;
|
|
|
+}
|
|
|
+
|
|
|
void perf_event_wakeup(struct perf_event *event)
|
|
|
{
|
|
|
ring_buffer_wakeup(event);
|
|
|
|
|
|
if (event->pending_kill) {
|
|
|
- kill_fasync(&event->fasync, SIGIO, event->pending_kill);
|
|
|
+ kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
|
|
|
event->pending_kill = 0;
|
|
|
}
|
|
|
}
|
|
@@ -6124,7 +6167,7 @@ static int __perf_event_overflow(struct perf_event *event,
|
|
|
else
|
|
|
perf_event_output(event, data, regs);
|
|
|
|
|
|
- if (event->fasync && event->pending_kill) {
|
|
|
+ if (*perf_event_fasync(event) && event->pending_kill) {
|
|
|
event->pending_wakeup = 1;
|
|
|
irq_work_queue(&event->pending);
|
|
|
}
|