|
|
@@ -243,30 +243,74 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
|
|
|
spin_lock_init(&rb->event_lock);
|
|
|
}
|
|
|
|
|
|
+#define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
|
|
|
+
|
|
|
+static struct page *rb_alloc_aux_page(int node, int order)
|
|
|
+{
|
|
|
+ struct page *page;
|
|
|
+
|
|
|
+ if (order > MAX_ORDER)
|
|
|
+ order = MAX_ORDER;
|
|
|
+
|
|
|
+ do {
|
|
|
+ page = alloc_pages_node(node, PERF_AUX_GFP, order);
|
|
|
+ } while (!page && order--);
|
|
|
+
|
|
|
+ if (page && order) {
|
|
|
+ /*
|
|
|
+ * Communicate the allocation size to the driver
|
|
|
+ */
|
|
|
+ split_page(page, order);
|
|
|
+ SetPagePrivate(page);
|
|
|
+ set_page_private(page, order);
|
|
|
+ }
|
|
|
+
|
|
|
+ return page;
|
|
|
+}
|
|
|
+
|
|
|
+static void rb_free_aux_page(struct ring_buffer *rb, int idx)
|
|
|
+{
|
|
|
+ struct page *page = virt_to_page(rb->aux_pages[idx]);
|
|
|
+
|
|
|
+ ClearPagePrivate(page);
|
|
|
+ page->mapping = NULL;
|
|
|
+ __free_page(page);
|
|
|
+}
|
|
|
+
|
|
|
int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
|
|
|
pgoff_t pgoff, int nr_pages, int flags)
|
|
|
{
|
|
|
bool overwrite = !(flags & RING_BUFFER_WRITABLE);
|
|
|
int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
|
|
|
- int ret = -ENOMEM;
|
|
|
+ int ret = -ENOMEM, max_order = 0;
|
|
|
|
|
|
if (!has_aux(event))
|
|
|
return -ENOTSUPP;
|
|
|
|
|
|
+ if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG)
|
|
|
+ /*
|
|
|
+ * We need to start with the max_order that fits in nr_pages,
|
|
|
+ * not the other way around, hence ilog2() and not get_order.
|
|
|
+ */
|
|
|
+ max_order = ilog2(nr_pages);
|
|
|
+
|
|
|
rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node);
|
|
|
if (!rb->aux_pages)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
rb->free_aux = event->pmu->free_aux;
|
|
|
- for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;
|
|
|
- rb->aux_nr_pages++) {
|
|
|
+ for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
|
|
|
struct page *page;
|
|
|
+ int last, order;
|
|
|
|
|
|
- page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
|
|
|
+ order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
|
|
|
+ page = rb_alloc_aux_page(node, order);
|
|
|
if (!page)
|
|
|
goto out;
|
|
|
|
|
|
- rb->aux_pages[rb->aux_nr_pages] = page_address(page);
|
|
|
+ for (last = rb->aux_nr_pages + (1 << page_private(page));
|
|
|
+ last > rb->aux_nr_pages; rb->aux_nr_pages++)
|
|
|
+ rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
|
|
|
}
|
|
|
|
|
|
rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages,
|
|
|
@@ -304,7 +348,7 @@ static void __rb_free_aux(struct ring_buffer *rb)
|
|
|
}
|
|
|
|
|
|
for (pg = 0; pg < rb->aux_nr_pages; pg++)
|
|
|
- free_page((unsigned long)rb->aux_pages[pg]);
|
|
|
+ rb_free_aux_page(rb, pg);
|
|
|
|
|
|
kfree(rb->aux_pages);
|
|
|
rb->aux_nr_pages = 0;
|