|
@@ -287,13 +287,26 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
|
|
|
if (!has_aux(event))
|
|
|
return -ENOTSUPP;
|
|
|
|
|
|
- if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG)
|
|
|
+ if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) {
|
|
|
/*
|
|
|
* We need to start with the max_order that fits in nr_pages,
|
|
|
* not the other way around, hence ilog2() and not get_order.
|
|
|
*/
|
|
|
max_order = ilog2(nr_pages);
|
|
|
|
|
|
+ /*
|
|
|
+ * PMU requests more than one contiguous chunks of memory
|
|
|
+ * for SW double buffering
|
|
|
+ */
|
|
|
+ if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_SW_DOUBLEBUF) &&
|
|
|
+ !overwrite) {
|
|
|
+ if (!max_order)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ max_order--;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node);
|
|
|
if (!rb->aux_pages)
|
|
|
return -ENOMEM;
|