فهرست منبع

Merge tag 'perf-core-for-mingo-4.14-20170816' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf core improvements and fixes:

New features:

- Support exporting Intel PT data to sqlite3 with python perf scripts,
  this is in addition to the postgresql support that was already there (Adrian Hunter)

Infrastructure changes:

- Handle perf tool builds with less features in perf shell tests, such
  as those with NO_LIBDWARF=1 or even without 'perf probe' (Arnaldo Carvalho de Melo)

- Replace '|&' with '2>&1 |' to work with more shells in the just
  introduced perf test shell harness (Kim Phillips)

Architecture related fixes:

- Fix endianness problem when loading parameters in the BPF prologue
  generated by perf, noticed using 'perf test BPF' in s390x systems (Wang Nan, Thomas Richter)

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Ingo Molnar 8 سال پیش
والد
کامیت
9881223c6c

+ 3 - 3
tools/perf/Documentation/intel-pt.txt

@@ -104,9 +104,9 @@ system, asynchronous, interrupt, transaction abort, trace begin, trace end, and
 in transaction, respectively.
 
 While it is possible to create scripts to analyze the data, an alternative
-approach is available to export the data to a postgresql database.  Refer to
-script export-to-postgresql.py for more details, and to script
-call-graph-from-postgresql.py for an example of using the database.
+approach is available to export the data to a sqlite or postgresql database.
+Refer to script export-to-sqlite.py or export-to-postgresql.py for more details,
+and to script call-graph-from-sql.py for an example of using the database.
 
 There is also script intel-pt-events.py which provides an example of how to
 unpack the raw data for power events and PTWRITE.

+ 8 - 0
tools/perf/scripts/python/bin/export-to-sqlite-record

@@ -0,0 +1,8 @@
+#!/bin/bash
+
+#
+# export perf data to a sqlite3 database. Can cover
+# perf ip samples (excluding the tracepoints). No special
+# record requirements, just record what you want to export.
+#
+perf record $@

+ 29 - 0
tools/perf/scripts/python/bin/export-to-sqlite-report

@@ -0,0 +1,29 @@
+#!/bin/bash
+# description: export perf data to a sqlite3 database
+# args: [database name] [columns] [calls]
+n_args=0
+for i in "$@"
+do
+    if expr match "$i" "-" > /dev/null ; then
+	break
+    fi
+    n_args=$(( $n_args + 1 ))
+done
+if [ "$n_args" -gt 3 ] ; then
+    echo "usage: export-to-sqlite-report [database name] [columns] [calls]"
+    exit
+fi
+if [ "$n_args" -gt 2 ] ; then
+    dbname=$1
+    columns=$2
+    calls=$3
+    shift 3
+elif [ "$n_args" -gt 1 ] ; then
+    dbname=$1
+    columns=$2
+    shift 2
+elif [ "$n_args" -gt 0 ] ; then
+    dbname=$1
+    shift
+fi
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/export-to-sqlite.py $dbname $columns $calls

+ 41 - 29
tools/perf/scripts/python/call-graph-from-postgresql.py → tools/perf/scripts/python/call-graph-from-sql.py

@@ -1,6 +1,6 @@
 #!/usr/bin/python2
-# call-graph-from-postgresql.py: create call-graph from postgresql database
-# Copyright (c) 2014, Intel Corporation.
+# call-graph-from-sql.py: create call-graph from sql database
+# Copyright (c) 2014-2017, Intel Corporation.
 #
 # This program is free software; you can redistribute it and/or modify it
 # under the terms and conditions of the GNU General Public License,
@@ -11,18 +11,19 @@
 # FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 # more details.
 
-# To use this script you will need to have exported data using the
-# export-to-postgresql.py script.  Refer to that script for details.
+# To use this script you will need to have exported data using either the
+# export-to-sqlite.py or the export-to-postgresql.py script.  Refer to those
+# scripts for details.
 #
-# Following on from the example in the export-to-postgresql.py script, a
+# Following on from the example in the export scripts, a
 # call-graph can be displayed for the pt_example database like this:
 #
-#	python tools/perf/scripts/python/call-graph-from-postgresql.py pt_example
+#	python tools/perf/scripts/python/call-graph-from-sql.py pt_example
 #
-# Note this script supports connecting to remote databases by setting hostname,
-# port, username, password, and dbname e.g.
+# Note that for PostgreSQL, this script supports connecting to remote databases
+# by setting hostname, port, username, password, and dbname e.g.
 #
-#	python tools/perf/scripts/python/call-graph-from-postgresql.py "hostname=myhost username=myuser password=mypassword dbname=pt_example"
+#	python tools/perf/scripts/python/call-graph-from-sql.py "hostname=myhost username=myuser password=mypassword dbname=pt_example"
 #
 # The result is a GUI window with a tree representing a context-sensitive
 # call-graph.  Expanding a couple of levels of the tree and adjusting column
@@ -160,7 +161,7 @@ class TreeItem():
 				  '( SELECT short_name FROM dsos WHERE id = ( SELECT dso_id FROM symbols WHERE id = ( SELECT symbol_id FROM call_paths WHERE id = call_path_id ) ) ), '
 				  '( SELECT ip FROM call_paths where id = call_path_id ) '
 				  'FROM calls WHERE parent_call_path_id = ' + str(self.call_path_id) + ' AND comm_id = ' + str(self.comm_id) + ' AND thread_id = ' + str(self.thread_id) +
-				  'ORDER BY call_path_id')
+				  ' ORDER BY call_path_id')
 		if not ret:
 			raise Exception("Query failed: " + query.lastError().text())
 		last_call_path_id = 0
@@ -291,29 +292,40 @@ class MainWindow(QMainWindow):
 
 if __name__ == '__main__':
 	if (len(sys.argv) < 2):
-		print >> sys.stderr, "Usage is: call-graph-from-postgresql.py <database name>"
+		print >> sys.stderr, "Usage is: call-graph-from-sql.py <database name>"
 		raise Exception("Too few arguments")
 
 	dbname = sys.argv[1]
 
-	db = QSqlDatabase.addDatabase('QPSQL')
-
-	opts = dbname.split()
-	for opt in opts:
-		if '=' in opt:
-			opt = opt.split('=')
-			if opt[0] == 'hostname':
-				db.setHostName(opt[1])
-			elif opt[0] == 'port':
-				db.setPort(int(opt[1]))
-			elif opt[0] == 'username':
-				db.setUserName(opt[1])
-			elif opt[0] == 'password':
-				db.setPassword(opt[1])
-			elif opt[0] == 'dbname':
-				dbname = opt[1]
-		else:
-			dbname = opt
+	is_sqlite3 = False
+	try:
+		f = open(dbname)
+		if f.read(15) == "SQLite format 3":
+			is_sqlite3 = True
+		f.close()
+	except:
+		pass
+
+	if is_sqlite3:
+		db = QSqlDatabase.addDatabase('QSQLITE')
+	else:
+		db = QSqlDatabase.addDatabase('QPSQL')
+		opts = dbname.split()
+		for opt in opts:
+			if '=' in opt:
+				opt = opt.split('=')
+				if opt[0] == 'hostname':
+					db.setHostName(opt[1])
+				elif opt[0] == 'port':
+					db.setPort(int(opt[1]))
+				elif opt[0] == 'username':
+					db.setUserName(opt[1])
+				elif opt[0] == 'password':
+					db.setPassword(opt[1])
+				elif opt[0] == 'dbname':
+					dbname = opt[1]
+			else:
+				dbname = opt
 
 	db.setDatabaseName(dbname)
 	if not db.open():

+ 3 - 2
tools/perf/scripts/python/export-to-postgresql.py

@@ -59,7 +59,7 @@ import datetime
 #	pt_example=# \q
 #
 # An example of using the database is provided by the script
-# call-graph-from-postgresql.py.  Refer to that script for details.
+# call-graph-from-sql.py.  Refer to that script for details.
 #
 # Tables:
 #
@@ -340,7 +340,8 @@ if branches:
 		'to_sym_offset	bigint,'
 		'to_ip		bigint,'
 		'branch_type	integer,'
-		'in_tx		boolean)')
+		'in_tx		boolean,'
+		'call_path_id	bigint)')
 else:
 	do_query(query, 'CREATE TABLE samples ('
 		'id		bigint		NOT NULL,'

+ 451 - 0
tools/perf/scripts/python/export-to-sqlite.py

@@ -0,0 +1,451 @@
+# export-to-sqlite.py: export perf data to a sqlite3 database
+# Copyright (c) 2017, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+
+import os
+import sys
+import struct
+import datetime
+
+# To use this script you will need to have installed package python-pyside which
+# provides LGPL-licensed Python bindings for Qt.  You will also need the package
+# libqt4-sql-sqlite for Qt sqlite3 support.
+#
+# An example of using this script with Intel PT:
+#
+#	$ perf record -e intel_pt//u ls
+#	$ perf script -s ~/libexec/perf-core/scripts/python/export-to-sqlite.py pt_example branches calls
+#	2017-07-31 14:26:07.326913 Creating database...
+#	2017-07-31 14:26:07.538097 Writing records...
+#	2017-07-31 14:26:09.889292 Adding indexes
+#	2017-07-31 14:26:09.958746 Done
+#
+# To browse the database, sqlite3 can be used e.g.
+#
+#	$ sqlite3 pt_example
+#	sqlite> .header on
+#	sqlite> select * from samples_view where id < 10;
+#	sqlite> .mode column
+#	sqlite> select * from samples_view where id < 10;
+#	sqlite> .tables
+#	sqlite> .schema samples_view
+#	sqlite> .quit
+#
+# An example of using the database is provided by the script
+# call-graph-from-sql.py.  Refer to that script for details.
+#
+# The database structure is practically the same as created by the script
+# export-to-postgresql.py. Refer to that script for details.  A notable
+# difference is  the 'transaction' column of the 'samples' table which is
+# renamed 'transaction_' in sqlite because 'transaction' is a reserved word.
+
+from PySide.QtSql import *
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+	'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+# These perf imports are not used at present
+#from perf_trace_context import *
+#from Core import *
+
+perf_db_export_mode = True
+perf_db_export_calls = False
+perf_db_export_callchains = False
+
+def usage():
+	print >> sys.stderr, "Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>]"
+	print >> sys.stderr, "where:	columns		'all' or 'branches'"
+	print >> sys.stderr, "		calls		'calls' => create calls and call_paths table"
+	print >> sys.stderr, "		callchains	'callchains' => create call_paths table"
+	raise Exception("Too few arguments")
+
+if (len(sys.argv) < 2):
+	usage()
+
+dbname = sys.argv[1]
+
+if (len(sys.argv) >= 3):
+	columns = sys.argv[2]
+else:
+	columns = "all"
+
+if columns not in ("all", "branches"):
+	usage()
+
+branches = (columns == "branches")
+
+for i in range(3,len(sys.argv)):
+	if (sys.argv[i] == "calls"):
+		perf_db_export_calls = True
+	elif (sys.argv[i] == "callchains"):
+		perf_db_export_callchains = True
+	else:
+		usage()
+
+def do_query(q, s):
+	if (q.exec_(s)):
+		return
+	raise Exception("Query failed: " + q.lastError().text())
+
+def do_query_(q):
+	if (q.exec_()):
+		return
+	raise Exception("Query failed: " + q.lastError().text())
+
+print datetime.datetime.today(), "Creating database..."
+
+db_exists = False
+try:
+	f = open(dbname)
+	f.close()
+	db_exists = True
+except:
+	pass
+
+if db_exists:
+	raise Exception(dbname + " already exists")
+
+db = QSqlDatabase.addDatabase('QSQLITE')
+db.setDatabaseName(dbname)
+db.open()
+
+query = QSqlQuery(db)
+
+do_query(query, 'PRAGMA journal_mode = OFF')
+do_query(query, 'BEGIN TRANSACTION')
+
+do_query(query, 'CREATE TABLE selected_events ('
+		'id		integer		NOT NULL	PRIMARY KEY,'
+		'name		varchar(80))')
+do_query(query, 'CREATE TABLE machines ('
+		'id		integer		NOT NULL	PRIMARY KEY,'
+		'pid		integer,'
+		'root_dir 	varchar(4096))')
+do_query(query, 'CREATE TABLE threads ('
+		'id		integer		NOT NULL	PRIMARY KEY,'
+		'machine_id	bigint,'
+		'process_id	bigint,'
+		'pid		integer,'
+		'tid		integer)')
+do_query(query, 'CREATE TABLE comms ('
+		'id		integer		NOT NULL	PRIMARY KEY,'
+		'comm		varchar(16))')
+do_query(query, 'CREATE TABLE comm_threads ('
+		'id		integer		NOT NULL	PRIMARY KEY,'
+		'comm_id	bigint,'
+		'thread_id	bigint)')
+do_query(query, 'CREATE TABLE dsos ('
+		'id		integer		NOT NULL	PRIMARY KEY,'
+		'machine_id	bigint,'
+		'short_name	varchar(256),'
+		'long_name	varchar(4096),'
+		'build_id	varchar(64))')
+do_query(query, 'CREATE TABLE symbols ('
+		'id		integer		NOT NULL	PRIMARY KEY,'
+		'dso_id		bigint,'
+		'sym_start	bigint,'
+		'sym_end	bigint,'
+		'binding	integer,'
+		'name		varchar(2048))')
+do_query(query, 'CREATE TABLE branch_types ('
+		'id		integer		NOT NULL	PRIMARY KEY,'
+		'name		varchar(80))')
+
+if branches:
+	do_query(query, 'CREATE TABLE samples ('
+		'id		integer		NOT NULL	PRIMARY KEY,'
+		'evsel_id	bigint,'
+		'machine_id	bigint,'
+		'thread_id	bigint,'
+		'comm_id	bigint,'
+		'dso_id		bigint,'
+		'symbol_id	bigint,'
+		'sym_offset	bigint,'
+		'ip		bigint,'
+		'time		bigint,'
+		'cpu		integer,'
+		'to_dso_id	bigint,'
+		'to_symbol_id	bigint,'
+		'to_sym_offset	bigint,'
+		'to_ip		bigint,'
+		'branch_type	integer,'
+		'in_tx		boolean,'
+		'call_path_id	bigint)')
+else:
+	do_query(query, 'CREATE TABLE samples ('
+		'id		integer		NOT NULL	PRIMARY KEY,'
+		'evsel_id	bigint,'
+		'machine_id	bigint,'
+		'thread_id	bigint,'
+		'comm_id	bigint,'
+		'dso_id		bigint,'
+		'symbol_id	bigint,'
+		'sym_offset	bigint,'
+		'ip		bigint,'
+		'time		bigint,'
+		'cpu		integer,'
+		'to_dso_id	bigint,'
+		'to_symbol_id	bigint,'
+		'to_sym_offset	bigint,'
+		'to_ip		bigint,'
+		'period		bigint,'
+		'weight		bigint,'
+		'transaction_	bigint,'
+		'data_src	bigint,'
+		'branch_type	integer,'
+		'in_tx		boolean,'
+		'call_path_id	bigint)')
+
+if perf_db_export_calls or perf_db_export_callchains:
+	do_query(query, 'CREATE TABLE call_paths ('
+		'id		integer		NOT NULL	PRIMARY KEY,'
+		'parent_id	bigint,'
+		'symbol_id	bigint,'
+		'ip		bigint)')
+if perf_db_export_calls:
+	do_query(query, 'CREATE TABLE calls ('
+		'id		integer		NOT NULL	PRIMARY KEY,'
+		'thread_id	bigint,'
+		'comm_id	bigint,'
+		'call_path_id	bigint,'
+		'call_time	bigint,'
+		'return_time	bigint,'
+		'branch_count	bigint,'
+		'call_id	bigint,'
+		'return_id	bigint,'
+		'parent_call_path_id	bigint,'
+		'flags		integer)')
+
+# printf was added to sqlite in version 3.8.3
+sqlite_has_printf = False
+try:
+	do_query(query, 'SELECT printf("") FROM machines')
+	sqlite_has_printf = True
+except:
+	pass
+
+def emit_to_hex(x):
+	if sqlite_has_printf:
+		return 'printf("%x", ' + x + ')'
+	else:
+		return x
+
+do_query(query, 'CREATE VIEW machines_view AS '
+	'SELECT '
+		'id,'
+		'pid,'
+		'root_dir,'
+		'CASE WHEN id=0 THEN \'unknown\' WHEN pid=-1 THEN \'host\' ELSE \'guest\' END AS host_or_guest'
+	' FROM machines')
+
+do_query(query, 'CREATE VIEW dsos_view AS '
+	'SELECT '
+		'id,'
+		'machine_id,'
+		'(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
+		'short_name,'
+		'long_name,'
+		'build_id'
+	' FROM dsos')
+
+do_query(query, 'CREATE VIEW symbols_view AS '
+	'SELECT '
+		'id,'
+		'name,'
+		'(SELECT short_name FROM dsos WHERE id=dso_id) AS dso,'
+		'dso_id,'
+		'sym_start,'
+		'sym_end,'
+		'CASE WHEN binding=0 THEN \'local\' WHEN binding=1 THEN \'global\' ELSE \'weak\' END AS binding'
+	' FROM symbols')
+
+do_query(query, 'CREATE VIEW threads_view AS '
+	'SELECT '
+		'id,'
+		'machine_id,'
+		'(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
+		'process_id,'
+		'pid,'
+		'tid'
+	' FROM threads')
+
+do_query(query, 'CREATE VIEW comm_threads_view AS '
+	'SELECT '
+		'comm_id,'
+		'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
+		'thread_id,'
+		'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
+		'(SELECT tid FROM threads WHERE id = thread_id) AS tid'
+	' FROM comm_threads')
+
+if perf_db_export_calls or perf_db_export_callchains:
+	do_query(query, 'CREATE VIEW call_paths_view AS '
+		'SELECT '
+			'c.id,'
+			+ emit_to_hex('c.ip') + ' AS ip,'
+			'c.symbol_id,'
+			'(SELECT name FROM symbols WHERE id = c.symbol_id) AS symbol,'
+			'(SELECT dso_id FROM symbols WHERE id = c.symbol_id) AS dso_id,'
+			'(SELECT dso FROM symbols_view  WHERE id = c.symbol_id) AS dso_short_name,'
+			'c.parent_id,'
+			+ emit_to_hex('p.ip') + ' AS parent_ip,'
+			'p.symbol_id AS parent_symbol_id,'
+			'(SELECT name FROM symbols WHERE id = p.symbol_id) AS parent_symbol,'
+			'(SELECT dso_id FROM symbols WHERE id = p.symbol_id) AS parent_dso_id,'
+			'(SELECT dso FROM symbols_view  WHERE id = p.symbol_id) AS parent_dso_short_name'
+		' FROM call_paths c INNER JOIN call_paths p ON p.id = c.parent_id')
+if perf_db_export_calls:
+	do_query(query, 'CREATE VIEW calls_view AS '
+		'SELECT '
+			'calls.id,'
+			'thread_id,'
+			'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
+			'(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
+			'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
+			'call_path_id,'
+			+ emit_to_hex('ip') + ' AS ip,'
+			'symbol_id,'
+			'(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
+			'call_time,'
+			'return_time,'
+			'return_time - call_time AS elapsed_time,'
+			'branch_count,'
+			'call_id,'
+			'return_id,'
+			'CASE WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' ELSE \'\' END AS flags,'
+			'parent_call_path_id'
+		' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id')
+
+do_query(query, 'CREATE VIEW samples_view AS '
+	'SELECT '
+		'id,'
+		'time,'
+		'cpu,'
+		'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
+		'(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
+		'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
+		'(SELECT name FROM selected_events WHERE id = evsel_id) AS event,'
+		+ emit_to_hex('ip') + ' AS ip_hex,'
+		'(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
+		'sym_offset,'
+		'(SELECT short_name FROM dsos WHERE id = dso_id) AS dso_short_name,'
+		+ emit_to_hex('to_ip') + ' AS to_ip_hex,'
+		'(SELECT name FROM symbols WHERE id = to_symbol_id) AS to_symbol,'
+		'to_sym_offset,'
+		'(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,'
+		'(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,'
+		'in_tx'
+	' FROM samples')
+
+do_query(query, 'END TRANSACTION')
+
+evsel_query = QSqlQuery(db)
+evsel_query.prepare("INSERT INTO selected_events VALUES (?, ?)")
+machine_query = QSqlQuery(db)
+machine_query.prepare("INSERT INTO machines VALUES (?, ?, ?)")
+thread_query = QSqlQuery(db)
+thread_query.prepare("INSERT INTO threads VALUES (?, ?, ?, ?, ?)")
+comm_query = QSqlQuery(db)
+comm_query.prepare("INSERT INTO comms VALUES (?, ?)")
+comm_thread_query = QSqlQuery(db)
+comm_thread_query.prepare("INSERT INTO comm_threads VALUES (?, ?, ?)")
+dso_query = QSqlQuery(db)
+dso_query.prepare("INSERT INTO dsos VALUES (?, ?, ?, ?, ?)")
+symbol_query = QSqlQuery(db)
+symbol_query.prepare("INSERT INTO symbols VALUES (?, ?, ?, ?, ?, ?)")
+branch_type_query = QSqlQuery(db)
+branch_type_query.prepare("INSERT INTO branch_types VALUES (?, ?)")
+sample_query = QSqlQuery(db)
+if branches:
+	sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
+else:
+	sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
+if perf_db_export_calls or perf_db_export_callchains:
+	call_path_query = QSqlQuery(db)
+	call_path_query.prepare("INSERT INTO call_paths VALUES (?, ?, ?, ?)")
+if perf_db_export_calls:
+	call_query = QSqlQuery(db)
+	call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
+
+def trace_begin():
+	print datetime.datetime.today(), "Writing records..."
+	do_query(query, 'BEGIN TRANSACTION')
+	# id == 0 means unknown.  It is easier to create records for them than replace the zeroes with NULLs
+	evsel_table(0, "unknown")
+	machine_table(0, 0, "unknown")
+	thread_table(0, 0, 0, -1, -1)
+	comm_table(0, "unknown")
+	dso_table(0, 0, "unknown", "unknown", "")
+	symbol_table(0, 0, 0, 0, 0, "unknown")
+	sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+	if perf_db_export_calls or perf_db_export_callchains:
+		call_path_table(0, 0, 0, 0)
+
+unhandled_count = 0
+
+def trace_end():
+	do_query(query, 'END TRANSACTION')
+
+	print datetime.datetime.today(), "Adding indexes"
+	if perf_db_export_calls:
+		do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
+
+	if (unhandled_count):
+		print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
+	print datetime.datetime.today(), "Done"
+
+def trace_unhandled(event_name, context, event_fields_dict):
+	global unhandled_count
+	unhandled_count += 1
+
+def sched__sched_switch(*x):
+	pass
+
+def bind_exec(q, n, x):
+	for xx in x[0:n]:
+		q.addBindValue(str(xx))
+	do_query_(q)
+
+def evsel_table(*x):
+	bind_exec(evsel_query, 2, x)
+
+def machine_table(*x):
+	bind_exec(machine_query, 3, x)
+
+def thread_table(*x):
+	bind_exec(thread_query, 5, x)
+
+def comm_table(*x):
+	bind_exec(comm_query, 2, x)
+
+def comm_thread_table(*x):
+	bind_exec(comm_thread_query, 3, x)
+
+def dso_table(*x):
+	bind_exec(dso_query, 5, x)
+
+def symbol_table(*x):
+	bind_exec(symbol_query, 6, x)
+
+def branch_type_table(*x):
+	bind_exec(branch_type_query, 2, x)
+
+def sample_table(*x):
+	if branches:
+		bind_exec(sample_query, 18, x)
+	else:
+		bind_exec(sample_query, 22, x)
+
+def call_path_table(*x):
+	bind_exec(call_path_query, 4, x)
+
+def call_return_table(*x):
+	bind_exec(call_query, 11, x)

+ 3 - 1
tools/perf/tests/bpf-script-test-prologue.c

@@ -26,9 +26,11 @@ static void (*bpf_trace_printk)(const char *fmt, int fmt_size, ...) =
 	(void *) 6;
 
 SEC("func=null_lseek file->f_mode offset orig")
-int bpf_func__null_lseek(void *ctx, int err, unsigned long f_mode,
+int bpf_func__null_lseek(void *ctx, int err, unsigned long _f_mode,
 			 unsigned long offset, unsigned long orig)
 {
+	fmode_t f_mode = (fmode_t)_f_mode;
+
 	if (err)
 		return 0;
 	if (f_mode & FMODE_WRITE)

+ 6 - 0
tools/perf/tests/shell/lib/probe.sh

@@ -0,0 +1,6 @@
+# Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
+
+skip_if_no_perf_probe() {
+	perf probe 2>&1 | grep -q 'is not a perf-command' && return 2
+	return 0
+}

+ 4 - 9
tools/perf/tests/shell/lib/probe_vfs_getname.sh

@@ -1,6 +1,6 @@
 # Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
 
-perf probe -l | grep -q probe:vfs_getname
+perf probe -l 2>&1 | grep -q probe:vfs_getname
 had_vfs_getname=$?
 
 cleanup_probe_vfs_getname() {
@@ -12,17 +12,12 @@ cleanup_probe_vfs_getname() {
 add_probe_vfs_getname() {
 	local verbose=$1
 	if [ $had_vfs_getname -eq 1 ] ; then
-               line=$(perf probe -L getname_flags | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/')
-               perf probe $verbose "vfs_getname=getname_flags:${line} pathname=result->name:string"
+		line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/')
+		perf probe $verbose "vfs_getname=getname_flags:${line} pathname=result->name:string"
 	fi
 }
 
 skip_if_no_debuginfo() {
-	add_probe_vfs_getname -v 2>&1 | grep -q "^Failed to find the path for kernel" && return 2
-	return 1
-}
-
-skip_if_no_debuginfo() {
-	add_probe_vfs_getname -v 2>&1 | grep -q "^Failed to find the path for kernel" && return 2
+	add_probe_vfs_getname -v 2>&1 | egrep -q "^(Failed to find the path for kernel|Debuginfo-analysis is not supported)" && return 2
 	return 1
 }

+ 4 - 0
tools/perf/tests/shell/probe_vfs_getname.sh

@@ -2,6 +2,10 @@
 #
 # Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
 
+. $(dirname $0)/lib/probe.sh
+
+skip_if_no_perf_probe || exit 2
+
 . $(dirname $0)/lib/probe_vfs_getname.sh
 
 add_probe_vfs_getname || skip_if_no_debuginfo

+ 4 - 0
tools/perf/tests/shell/record+script_probe_vfs_getname.sh

@@ -7,6 +7,10 @@
 
 # Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
 
+. $(dirname $0)/lib/probe.sh
+
+skip_if_no_perf_probe || exit 2
+
 . $(dirname $0)/lib/probe_vfs_getname.sh
 
 perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)

+ 4 - 1
tools/perf/tests/shell/trace+probe_libc_inet_pton.sh

@@ -8,6 +8,8 @@
 
 # Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
 
+. $(dirname $0)/lib/probe.sh
+
 trace_libc_inet_pton_backtrace() {
 	idx=0
 	expected[0]="PING.*bytes"
@@ -20,7 +22,7 @@ trace_libc_inet_pton_backtrace() {
 	expected[7]="getaddrinfo[[:space:]]\(/usr/lib.*/libc-[0-9]+\.[0-9]+\.so\)$"
 	expected[8]=".*\(.*/bin/ping.*\)$"
 
-	perf trace --no-syscalls -e probe_libc:inet_pton/max-stack=3/ ping -6 -c 1 ::1 |& grep -v ^$ | while read line ; do
+	perf trace --no-syscalls -e probe_libc:inet_pton/max-stack=3/ ping -6 -c 1 ::1 2>&1 | grep -v ^$ | while read line ; do
 		echo $line
 		echo "$line" | egrep -q "${expected[$idx]}"
 		if [ $? -ne 0 ] ; then
@@ -32,6 +34,7 @@ trace_libc_inet_pton_backtrace() {
 	done
 }
 
+skip_if_no_perf_probe && \
 perf probe -q /lib64/libc-*.so inet_pton && \
 trace_libc_inet_pton_backtrace
 err=$?

+ 4 - 0
tools/perf/tests/shell/trace+probe_vfs_getname.sh

@@ -8,6 +8,10 @@
 
 # Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
 
+. $(dirname $0)/lib/probe.sh
+
+skip_if_no_perf_probe || exit 2
+
 . $(dirname $0)/lib/probe_vfs_getname.sh
 
 file=$(mktemp /tmp/temporary_file.XXXXX)

+ 47 - 2
tools/perf/util/bpf-prologue.c

@@ -58,6 +58,46 @@ check_pos(struct bpf_insn_pos *pos)
 	return 0;
 }
 
+/*
+ * Convert type string (u8/u16/u32/u64/s8/s16/s32/s64 ..., see
+ * Documentation/trace/kprobetrace.txt) to size field of BPF_LDX_MEM
+ * instruction (BPF_{B,H,W,DW}).
+ */
+static int
+argtype_to_ldx_size(const char *type)
+{
+	int arg_size = type ? atoi(&type[1]) : 64;
+
+	switch (arg_size) {
+	case 8:
+		return BPF_B;
+	case 16:
+		return BPF_H;
+	case 32:
+		return BPF_W;
+	case 64:
+	default:
+		return BPF_DW;
+	}
+}
+
+static const char *
+insn_sz_to_str(int insn_sz)
+{
+	switch (insn_sz) {
+	case BPF_B:
+		return "BPF_B";
+	case BPF_H:
+		return "BPF_H";
+	case BPF_W:
+		return "BPF_W";
+	case BPF_DW:
+		return "BPF_DW";
+	default:
+		return "UNKNOWN";
+	}
+}
+
 /* Give it a shorter name */
 #define ins(i, p) append_insn((i), (p))
 
@@ -258,9 +298,14 @@ gen_prologue_slowpath(struct bpf_insn_pos *pos,
 	}
 
 	/* Final pass: read to registers */
-	for (i = 0; i < nargs; i++)
-		ins(BPF_LDX_MEM(BPF_DW, BPF_PROLOGUE_START_ARG_REG + i,
+	for (i = 0; i < nargs; i++) {
+		int insn_sz = (args[i].ref) ? argtype_to_ldx_size(args[i].type) : BPF_DW;
+
+		pr_debug("prologue: load arg %d, insn_sz is %s\n",
+			 i, insn_sz_to_str(insn_sz));
+		ins(BPF_LDX_MEM(insn_sz, BPF_PROLOGUE_START_ARG_REG + i,
 				BPF_REG_FP, -BPF_REG_SIZE * (i + 1)), pos);
+	}
 
 	ins(BPF_JMP_IMM(BPF_JA, BPF_REG_0, 0, JMP_TO_SUCCESS_CODE), pos);