|
@@ -438,6 +438,7 @@ struct ring_buffer_per_cpu {
|
|
raw_spinlock_t reader_lock; /* serialize readers */
|
|
raw_spinlock_t reader_lock; /* serialize readers */
|
|
arch_spinlock_t lock;
|
|
arch_spinlock_t lock;
|
|
struct lock_class_key lock_key;
|
|
struct lock_class_key lock_key;
|
|
|
|
+ struct buffer_data_page *free_page;
|
|
unsigned long nr_pages;
|
|
unsigned long nr_pages;
|
|
unsigned int current_context;
|
|
unsigned int current_context;
|
|
struct list_head *pages;
|
|
struct list_head *pages;
|
|
@@ -4377,9 +4378,25 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
|
|
*/
|
|
*/
|
|
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
|
|
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
|
|
{
|
|
{
|
|
- struct buffer_data_page *bpage;
|
|
|
|
|
|
+ struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
|
|
|
|
+ struct buffer_data_page *bpage = NULL;
|
|
|
|
+ unsigned long flags;
|
|
struct page *page;
|
|
struct page *page;
|
|
|
|
|
|
|
|
+ local_irq_save(flags);
|
|
|
|
+ arch_spin_lock(&cpu_buffer->lock);
|
|
|
|
+
|
|
|
|
+ if (cpu_buffer->free_page) {
|
|
|
|
+ bpage = cpu_buffer->free_page;
|
|
|
|
+ cpu_buffer->free_page = NULL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ arch_spin_unlock(&cpu_buffer->lock);
|
|
|
|
+ local_irq_restore(flags);
|
|
|
|
+
|
|
|
|
+ if (bpage)
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
page = alloc_pages_node(cpu_to_node(cpu),
|
|
page = alloc_pages_node(cpu_to_node(cpu),
|
|
GFP_KERNEL | __GFP_NORETRY, 0);
|
|
GFP_KERNEL | __GFP_NORETRY, 0);
|
|
if (!page)
|
|
if (!page)
|
|
@@ -4387,6 +4404,7 @@ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
|
|
|
|
|
|
bpage = page_address(page);
|
|
bpage = page_address(page);
|
|
|
|
|
|
|
|
+ out:
|
|
rb_init_page(bpage);
|
|
rb_init_page(bpage);
|
|
|
|
|
|
return bpage;
|
|
return bpage;
|
|
@@ -4396,13 +4414,29 @@ EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
|
|
/**
|
|
/**
|
|
* ring_buffer_free_read_page - free an allocated read page
|
|
* ring_buffer_free_read_page - free an allocated read page
|
|
* @buffer: the buffer the page was allocate for
|
|
* @buffer: the buffer the page was allocate for
|
|
|
|
+ * @cpu: the cpu buffer the page came from
|
|
* @data: the page to free
|
|
* @data: the page to free
|
|
*
|
|
*
|
|
* Free a page allocated from ring_buffer_alloc_read_page.
|
|
* Free a page allocated from ring_buffer_alloc_read_page.
|
|
*/
|
|
*/
|
|
-void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
|
|
|
|
|
|
+void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data)
|
|
{
|
|
{
|
|
- free_page((unsigned long)data);
|
|
|
|
|
|
+ struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
|
|
|
|
+ struct buffer_data_page *bpage = data;
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
|
|
+ local_irq_save(flags);
|
|
|
|
+ arch_spin_lock(&cpu_buffer->lock);
|
|
|
|
+
|
|
|
|
+ if (!cpu_buffer->free_page) {
|
|
|
|
+ cpu_buffer->free_page = bpage;
|
|
|
|
+ bpage = NULL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ arch_spin_unlock(&cpu_buffer->lock);
|
|
|
|
+ local_irq_restore(flags);
|
|
|
|
+
|
|
|
|
+ free_page((unsigned long)bpage);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
|
|
EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
|
|
|
|
|