|
@@ -640,6 +640,7 @@ static notrace void trace_event_raw_event_synth(void *__data,
|
|
|
struct trace_event_file *trace_file = __data;
|
|
struct trace_event_file *trace_file = __data;
|
|
|
struct synth_trace_event *entry;
|
|
struct synth_trace_event *entry;
|
|
|
struct trace_event_buffer fbuffer;
|
|
struct trace_event_buffer fbuffer;
|
|
|
|
|
+ struct ring_buffer *buffer;
|
|
|
struct synth_event *event;
|
|
struct synth_event *event;
|
|
|
unsigned int i, n_u64;
|
|
unsigned int i, n_u64;
|
|
|
int fields_size = 0;
|
|
int fields_size = 0;
|
|
@@ -651,10 +652,17 @@ static notrace void trace_event_raw_event_synth(void *__data,
|
|
|
|
|
|
|
|
fields_size = event->n_u64 * sizeof(u64);
|
|
fields_size = event->n_u64 * sizeof(u64);
|
|
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
|
+ * Avoid ring buffer recursion detection, as this event
|
|
|
|
|
+ * is being performed within another event.
|
|
|
|
|
+ */
|
|
|
|
|
+ buffer = trace_file->tr->trace_buffer.buffer;
|
|
|
|
|
+ ring_buffer_nest_start(buffer);
|
|
|
|
|
+
|
|
|
entry = trace_event_buffer_reserve(&fbuffer, trace_file,
|
|
entry = trace_event_buffer_reserve(&fbuffer, trace_file,
|
|
|
sizeof(*entry) + fields_size);
|
|
sizeof(*entry) + fields_size);
|
|
|
if (!entry)
|
|
if (!entry)
|
|
|
- return;
|
|
|
|
|
|
|
+ goto out;
|
|
|
|
|
|
|
|
for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
|
|
for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
|
|
|
if (event->fields[i]->is_string) {
|
|
if (event->fields[i]->is_string) {
|
|
@@ -670,6 +678,8 @@ static notrace void trace_event_raw_event_synth(void *__data,
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
trace_event_buffer_commit(&fbuffer);
|
|
trace_event_buffer_commit(&fbuffer);
|
|
|
|
|
+out:
|
|
|
|
|
+ ring_buffer_nest_end(buffer);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
static void free_synth_event_print_fmt(struct trace_event_call *call)
|
|
static void free_synth_event_print_fmt(struct trace_event_call *call)
|