Эх сурвалжийг харах

perf: Optimize perf_output_begin() -- lost_event case

Avoid touching the lost_event and sample_data cachelines twince. Its
not like we end up doing less work, but it might help to keep all
accesses to these cachelines in one place.

Due to code shuffle, this looses 4 bytes on x86_64-defconfig.

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Cc: Michael Ellerman <michael@ellerman.id.au>
Cc: Michael Neuling <mikey@neuling.org>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: james.hogan@imgtec.com
Cc: Vince Weaver <vince@deater.net>
Cc: Victor Kaplansky <VICTORK@il.ibm.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Anton Blanchard <anton@samba.org>
Link: http://lkml.kernel.org/n/tip-zfxnc58qxj0eawdoj31hhupv@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Peter Zijlstra 11 жил өмнө
parent
commit
d20a973f46

+ 8 - 5
kernel/events/ring_buffer.c

@@ -106,7 +106,6 @@ int perf_output_begin(struct perf_output_handle *handle,
 	struct ring_buffer *rb;
 	struct ring_buffer *rb;
 	unsigned long tail, offset, head;
 	unsigned long tail, offset, head;
 	int have_lost;
 	int have_lost;
-	struct perf_sample_data sample_data;
 	struct {
 	struct {
 		struct perf_event_header header;
 		struct perf_event_header header;
 		u64			 id;
 		u64			 id;
@@ -132,10 +131,9 @@ int perf_output_begin(struct perf_output_handle *handle,
 
 
 	have_lost = local_read(&rb->lost);
 	have_lost = local_read(&rb->lost);
 	if (unlikely(have_lost)) {
 	if (unlikely(have_lost)) {
-		lost_event.header.size = sizeof(lost_event);
-		perf_event_header__init_id(&lost_event.header, &sample_data,
-					   event);
-		size += lost_event.header.size;
+		size += sizeof(lost_event);
+		if (event->attr.sample_id_all)
+			size += event->id_header_size;
 	}
 	}
 
 
 	perf_output_get_handle(handle);
 	perf_output_get_handle(handle);
@@ -169,11 +167,16 @@ int perf_output_begin(struct perf_output_handle *handle,
 	handle->size = (PAGE_SIZE << page_order(rb)) - handle->size;
 	handle->size = (PAGE_SIZE << page_order(rb)) - handle->size;
 
 
 	if (unlikely(have_lost)) {
 	if (unlikely(have_lost)) {
+		struct perf_sample_data sample_data;
+
+		lost_event.header.size = sizeof(lost_event);
 		lost_event.header.type = PERF_RECORD_LOST;
 		lost_event.header.type = PERF_RECORD_LOST;
 		lost_event.header.misc = 0;
 		lost_event.header.misc = 0;
 		lost_event.id          = event->id;
 		lost_event.id          = event->id;
 		lost_event.lost        = local_xchg(&rb->lost, 0);
 		lost_event.lost        = local_xchg(&rb->lost, 0);
 
 
+		perf_event_header__init_id(&lost_event.header,
+					   &sample_data, event);
 		perf_output_put(handle, lost_event);
 		perf_output_put(handle, lost_event);
 		perf_event__output_id_sample(event, handle, &sample_data);
 		perf_event__output_id_sample(event, handle, &sample_data);
 	}
 	}