|
@@ -221,6 +221,8 @@ void perf_output_end(struct perf_output_handle *handle)
|
|
|
rcu_read_unlock();
|
|
|
}
|
|
|
|
|
|
+static void rb_irq_work(struct irq_work *work);
|
|
|
+
|
|
|
static void
|
|
|
ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
|
|
|
{
|
|
@@ -241,6 +243,16 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
|
|
|
|
|
|
INIT_LIST_HEAD(&rb->event_list);
|
|
|
spin_lock_init(&rb->event_lock);
|
|
|
+ init_irq_work(&rb->irq_work, rb_irq_work);
|
|
|
+}
|
|
|
+
|
|
|
+static void ring_buffer_put_async(struct ring_buffer *rb)
|
|
|
+{
|
|
|
+ if (!atomic_dec_and_test(&rb->refcount))
|
|
|
+ return;
|
|
|
+
|
|
|
+ rb->rcu_head.next = (void *)rb;
|
|
|
+ irq_work_queue(&rb->irq_work);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -319,7 +331,7 @@ err_put:
|
|
|
rb_free_aux(rb);
|
|
|
|
|
|
err:
|
|
|
- ring_buffer_put(rb);
|
|
|
+ ring_buffer_put_async(rb);
|
|
|
handle->event = NULL;
|
|
|
|
|
|
return NULL;
|
|
@@ -370,7 +382,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
|
|
|
|
|
|
local_set(&rb->aux_nest, 0);
|
|
|
rb_free_aux(rb);
|
|
|
- ring_buffer_put(rb);
|
|
|
+ ring_buffer_put_async(rb);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -557,7 +569,18 @@ static void __rb_free_aux(struct ring_buffer *rb)
|
|
|
void rb_free_aux(struct ring_buffer *rb)
|
|
|
{
|
|
|
if (atomic_dec_and_test(&rb->aux_refcount))
|
|
|
+ irq_work_queue(&rb->irq_work);
|
|
|
+}
|
|
|
+
|
|
|
+static void rb_irq_work(struct irq_work *work)
|
|
|
+{
|
|
|
+ struct ring_buffer *rb = container_of(work, struct ring_buffer, irq_work);
|
|
|
+
|
|
|
+ if (!atomic_read(&rb->aux_refcount))
|
|
|
__rb_free_aux(rb);
|
|
|
+
|
|
|
+ if (rb->rcu_head.next == (void *)rb)
|
|
|
+ call_rcu(&rb->rcu_head, rb_free_rcu);
|
|
|
}
|
|
|
|
|
|
#ifndef CONFIG_PERF_USE_VMALLOC
|