|
@@ -3192,7 +3192,8 @@ static void free_event_rcu(struct rcu_head *head)
|
|
|
}
|
|
|
|
|
|
static void ring_buffer_put(struct ring_buffer *rb);
|
|
|
-static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
|
|
|
+static void ring_buffer_attach(struct perf_event *event,
|
|
|
+ struct ring_buffer *rb);
|
|
|
|
|
|
static void unaccount_event_cpu(struct perf_event *event, int cpu)
|
|
|
{
|
|
@@ -3252,8 +3253,6 @@ static void free_event(struct perf_event *event)
|
|
|
unaccount_event(event);
|
|
|
|
|
|
if (event->rb) {
|
|
|
- struct ring_buffer *rb;
|
|
|
-
|
|
|
/*
|
|
|
* Can happen when we close an event with re-directed output.
|
|
|
*
|
|
@@ -3261,12 +3260,7 @@ static void free_event(struct perf_event *event)
|
|
|
* over us; possibly making our ring_buffer_put() the last.
|
|
|
*/
|
|
|
mutex_lock(&event->mmap_mutex);
|
|
|
- rb = event->rb;
|
|
|
- if (rb) {
|
|
|
- rcu_assign_pointer(event->rb, NULL);
|
|
|
- ring_buffer_detach(event, rb);
|
|
|
- ring_buffer_put(rb); /* could be last */
|
|
|
- }
|
|
|
+ ring_buffer_attach(event, NULL);
|
|
|
mutex_unlock(&event->mmap_mutex);
|
|
|
}
|
|
|
|
|
@@ -3850,28 +3844,47 @@ unlock:
|
|
|
static void ring_buffer_attach(struct perf_event *event,
|
|
|
struct ring_buffer *rb)
|
|
|
{
|
|
|
+ struct ring_buffer *old_rb = NULL;
|
|
|
unsigned long flags;
|
|
|
|
|
|
- if (!list_empty(&event->rb_entry))
|
|
|
- return;
|
|
|
+ if (event->rb) {
|
|
|
+ /*
|
|
|
+ * Should be impossible, we set this when removing
|
|
|
+ * event->rb_entry and wait/clear when adding event->rb_entry.
|
|
|
+ */
|
|
|
+ WARN_ON_ONCE(event->rcu_pending);
|
|
|
|
|
|
- spin_lock_irqsave(&rb->event_lock, flags);
|
|
|
- if (list_empty(&event->rb_entry))
|
|
|
- list_add(&event->rb_entry, &rb->event_list);
|
|
|
- spin_unlock_irqrestore(&rb->event_lock, flags);
|
|
|
-}
|
|
|
+ old_rb = event->rb;
|
|
|
+ event->rcu_batches = get_state_synchronize_rcu();
|
|
|
+ event->rcu_pending = 1;
|
|
|
|
|
|
-static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb)
|
|
|
-{
|
|
|
- unsigned long flags;
|
|
|
+ spin_lock_irqsave(&old_rb->event_lock, flags);
|
|
|
+ list_del_rcu(&event->rb_entry);
|
|
|
+ spin_unlock_irqrestore(&old_rb->event_lock, flags);
|
|
|
+ }
|
|
|
|
|
|
- if (list_empty(&event->rb_entry))
|
|
|
- return;
|
|
|
+ if (event->rcu_pending && rb) {
|
|
|
+ cond_synchronize_rcu(event->rcu_batches);
|
|
|
+ event->rcu_pending = 0;
|
|
|
+ }
|
|
|
|
|
|
- spin_lock_irqsave(&rb->event_lock, flags);
|
|
|
- list_del_init(&event->rb_entry);
|
|
|
- wake_up_all(&event->waitq);
|
|
|
- spin_unlock_irqrestore(&rb->event_lock, flags);
|
|
|
+ if (rb) {
|
|
|
+ spin_lock_irqsave(&rb->event_lock, flags);
|
|
|
+ list_add_rcu(&event->rb_entry, &rb->event_list);
|
|
|
+ spin_unlock_irqrestore(&rb->event_lock, flags);
|
|
|
+ }
|
|
|
+
|
|
|
+ rcu_assign_pointer(event->rb, rb);
|
|
|
+
|
|
|
+ if (old_rb) {
|
|
|
+ ring_buffer_put(old_rb);
|
|
|
+ /*
|
|
|
+ * Since we detached before setting the new rb, so that we
|
|
|
+ * could attach the new rb, we could have missed a wakeup.
|
|
|
+ * Provide it now.
|
|
|
+ */
|
|
|
+ wake_up_all(&event->waitq);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static void ring_buffer_wakeup(struct perf_event *event)
|
|
@@ -3940,7 +3953,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
|
|
|
{
|
|
|
struct perf_event *event = vma->vm_file->private_data;
|
|
|
|
|
|
- struct ring_buffer *rb = event->rb;
|
|
|
+ struct ring_buffer *rb = ring_buffer_get(event);
|
|
|
struct user_struct *mmap_user = rb->mmap_user;
|
|
|
int mmap_locked = rb->mmap_locked;
|
|
|
unsigned long size = perf_data_size(rb);
|
|
@@ -3948,18 +3961,14 @@ static void perf_mmap_close(struct vm_area_struct *vma)
|
|
|
atomic_dec(&rb->mmap_count);
|
|
|
|
|
|
if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
|
|
|
- return;
|
|
|
+ goto out_put;
|
|
|
|
|
|
- /* Detach current event from the buffer. */
|
|
|
- rcu_assign_pointer(event->rb, NULL);
|
|
|
- ring_buffer_detach(event, rb);
|
|
|
+ ring_buffer_attach(event, NULL);
|
|
|
mutex_unlock(&event->mmap_mutex);
|
|
|
|
|
|
/* If there's still other mmap()s of this buffer, we're done. */
|
|
|
- if (atomic_read(&rb->mmap_count)) {
|
|
|
- ring_buffer_put(rb); /* can't be last */
|
|
|
- return;
|
|
|
- }
|
|
|
+ if (atomic_read(&rb->mmap_count))
|
|
|
+ goto out_put;
|
|
|
|
|
|
/*
|
|
|
* No other mmap()s, detach from all other events that might redirect
|
|
@@ -3989,11 +3998,9 @@ again:
|
|
|
* still restart the iteration to make sure we're not now
|
|
|
* iterating the wrong list.
|
|
|
*/
|
|
|
- if (event->rb == rb) {
|
|
|
- rcu_assign_pointer(event->rb, NULL);
|
|
|
- ring_buffer_detach(event, rb);
|
|
|
- ring_buffer_put(rb); /* can't be last, we still have one */
|
|
|
- }
|
|
|
+ if (event->rb == rb)
|
|
|
+ ring_buffer_attach(event, NULL);
|
|
|
+
|
|
|
mutex_unlock(&event->mmap_mutex);
|
|
|
put_event(event);
|
|
|
|
|
@@ -4018,6 +4025,7 @@ again:
|
|
|
vma->vm_mm->pinned_vm -= mmap_locked;
|
|
|
free_uid(mmap_user);
|
|
|
|
|
|
+out_put:
|
|
|
ring_buffer_put(rb); /* could be last */
|
|
|
}
|
|
|
|
|
@@ -4135,7 +4143,6 @@ again:
|
|
|
vma->vm_mm->pinned_vm += extra;
|
|
|
|
|
|
ring_buffer_attach(event, rb);
|
|
|
- rcu_assign_pointer(event->rb, rb);
|
|
|
|
|
|
perf_event_init_userpage(event);
|
|
|
perf_event_update_userpage(event);
|
|
@@ -6934,7 +6941,7 @@ err_size:
|
|
|
static int
|
|
|
perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
|
|
|
{
|
|
|
- struct ring_buffer *rb = NULL, *old_rb = NULL;
|
|
|
+ struct ring_buffer *rb = NULL;
|
|
|
int ret = -EINVAL;
|
|
|
|
|
|
if (!output_event)
|
|
@@ -6962,8 +6969,6 @@ set:
|
|
|
if (atomic_read(&event->mmap_count))
|
|
|
goto unlock;
|
|
|
|
|
|
- old_rb = event->rb;
|
|
|
-
|
|
|
if (output_event) {
|
|
|
/* get the rb we want to redirect to */
|
|
|
rb = ring_buffer_get(output_event);
|
|
@@ -6971,23 +6976,7 @@ set:
|
|
|
goto unlock;
|
|
|
}
|
|
|
|
|
|
- if (old_rb)
|
|
|
- ring_buffer_detach(event, old_rb);
|
|
|
-
|
|
|
- if (rb)
|
|
|
- ring_buffer_attach(event, rb);
|
|
|
-
|
|
|
- rcu_assign_pointer(event->rb, rb);
|
|
|
-
|
|
|
- if (old_rb) {
|
|
|
- ring_buffer_put(old_rb);
|
|
|
- /*
|
|
|
- * Since we detached before setting the new rb, so that we
|
|
|
- * could attach the new rb, we could have missed a wakeup.
|
|
|
- * Provide it now.
|
|
|
- */
|
|
|
- wake_up_all(&event->waitq);
|
|
|
- }
|
|
|
+ ring_buffer_attach(event, rb);
|
|
|
|
|
|
ret = 0;
|
|
|
unlock:
|