Browse Source

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Ingo Molnar:
 "Two kprobes fixes and a handful of tooling fixes"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf tools: Make sparc64 arch point to sparc
  perf symbols: Define EM_AARCH64 for older OSes
  perf top: Fix SIGBUS on sparc64
  perf tools: Fix probing for PERF_FLAG_FD_CLOEXEC flag
  perf tools: Fix pthread_attr_setaffinity_np build error
  perf tools: Define _GNU_SOURCE on pthread_attr_setaffinity_np feature check
  perf bench: Fix order of arguments to memcpy_alloc_mem
  kprobes/x86: Check for invalid ftrace location in __recover_probed_insn()
  kprobes/x86: Use 5-byte NOP when the code might be modified by ftrace
Linus Torvalds 10 years ago
parent
commit
d7b48fec35

+ 40 - 14
arch/x86/kernel/kprobes/core.c

@@ -223,27 +223,48 @@ static unsigned long
 __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
 __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
 {
 {
 	struct kprobe *kp;
 	struct kprobe *kp;
+	unsigned long faddr;
 
 
 	kp = get_kprobe((void *)addr);
 	kp = get_kprobe((void *)addr);
-	/* There is no probe, return original address */
-	if (!kp)
+	faddr = ftrace_location(addr);
+	/*
+	 * Addresses inside the ftrace location are refused by
+	 * arch_check_ftrace_location(). Something went terribly wrong
+	 * if such an address is checked here.
+	 */
+	if (WARN_ON(faddr && faddr != addr))
+		return 0UL;
+	/*
+	 * Use the current code if it is not modified by Kprobe
+	 * and it cannot be modified by ftrace.
+	 */
+	if (!kp && !faddr)
 		return addr;
 		return addr;
 
 
 	/*
 	/*
-	 *  Basically, kp->ainsn.insn has an original instruction.
-	 *  However, RIP-relative instruction can not do single-stepping
-	 *  at different place, __copy_instruction() tweaks the displacement of
-	 *  that instruction. In that case, we can't recover the instruction
-	 *  from the kp->ainsn.insn.
+	 * Basically, kp->ainsn.insn has an original instruction.
+	 * However, RIP-relative instruction can not do single-stepping
+	 * at different place, __copy_instruction() tweaks the displacement of
+	 * that instruction. In that case, we can't recover the instruction
+	 * from the kp->ainsn.insn.
 	 *
 	 *
-	 *  On the other hand, kp->opcode has a copy of the first byte of
-	 *  the probed instruction, which is overwritten by int3. And
-	 *  the instruction at kp->addr is not modified by kprobes except
-	 *  for the first byte, we can recover the original instruction
-	 *  from it and kp->opcode.
+	 * On the other hand, in case on normal Kprobe, kp->opcode has a copy
+	 * of the first byte of the probed instruction, which is overwritten
+	 * by int3. And the instruction at kp->addr is not modified by kprobes
+	 * except for the first byte, we can recover the original instruction
+	 * from it and kp->opcode.
+	 *
+	 * In case of Kprobes using ftrace, we do not have a copy of
+	 * the original instruction. In fact, the ftrace location might
+	 * be modified at anytime and even could be in an inconsistent state.
+	 * Fortunately, we know that the original code is the ideal 5-byte
+	 * long NOP.
 	 */
 	 */
-	memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
-	buf[0] = kp->opcode;
+	memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+	if (faddr)
+		memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
+	else
+		buf[0] = kp->opcode;
 	return (unsigned long)buf;
 	return (unsigned long)buf;
 }
 }
 
 
@@ -251,6 +272,7 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
  * Recover the probed instruction at addr for further analysis.
  * Recover the probed instruction at addr for further analysis.
  * Caller must lock kprobes by kprobe_mutex, or disable preemption
  * Caller must lock kprobes by kprobe_mutex, or disable preemption
  * for preventing to release referencing kprobes.
  * for preventing to release referencing kprobes.
+ * Returns zero if the instruction can not get recovered.
  */
  */
 unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
 unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
 {
 {
@@ -285,6 +307,8 @@ static int can_probe(unsigned long paddr)
 		 * normally used, we just go through if there is no kprobe.
 		 * normally used, we just go through if there is no kprobe.
 		 */
 		 */
 		__addr = recover_probed_instruction(buf, addr);
 		__addr = recover_probed_instruction(buf, addr);
+		if (!__addr)
+			return 0;
 		kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE);
 		kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE);
 		insn_get_length(&insn);
 		insn_get_length(&insn);
 
 
@@ -333,6 +357,8 @@ int __copy_instruction(u8 *dest, u8 *src)
 	unsigned long recovered_insn =
 	unsigned long recovered_insn =
 		recover_probed_instruction(buf, (unsigned long)src);
 		recover_probed_instruction(buf, (unsigned long)src);
 
 
+	if (!recovered_insn)
+		return 0;
 	kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
 	kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
 	insn_get_length(&insn);
 	insn_get_length(&insn);
 	/* Another subsystem puts a breakpoint, failed to recover */
 	/* Another subsystem puts a breakpoint, failed to recover */

+ 2 - 0
arch/x86/kernel/kprobes/opt.c

@@ -259,6 +259,8 @@ static int can_optimize(unsigned long paddr)
 			 */
 			 */
 			return 0;
 			return 0;
 		recovered_insn = recover_probed_instruction(buf, addr);
 		recovered_insn = recover_probed_instruction(buf, addr);
+		if (!recovered_insn)
+			return 0;
 		kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
 		kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
 		insn_get_length(&insn);
 		insn_get_length(&insn);
 		/* Another subsystem puts a breakpoint */
 		/* Another subsystem puts a breakpoint */

+ 2 - 2
tools/perf/bench/mem-memcpy.c

@@ -289,7 +289,7 @@ static u64 do_memcpy_cycle(const struct routine *r, size_t len, bool prefault)
 	memcpy_t fn = r->fn.memcpy;
 	memcpy_t fn = r->fn.memcpy;
 	int i;
 	int i;
 
 
-	memcpy_alloc_mem(&src, &dst, len);
+	memcpy_alloc_mem(&dst, &src, len);
 
 
 	if (prefault)
 	if (prefault)
 		fn(dst, src, len);
 		fn(dst, src, len);
@@ -312,7 +312,7 @@ static double do_memcpy_gettimeofday(const struct routine *r, size_t len,
 	void *src = NULL, *dst = NULL;
 	void *src = NULL, *dst = NULL;
 	int i;
 	int i;
 
 
-	memcpy_alloc_mem(&src, &dst, len);
+	memcpy_alloc_mem(&dst, &src, len);
 
 
 	if (prefault)
 	if (prefault)
 		fn(dst, src, len);
 		fn(dst, src, len);

+ 4 - 0
tools/perf/config/Makefile.arch

@@ -21,6 +21,10 @@ ifeq ($(RAW_ARCH),x86_64)
   endif
   endif
 endif
 endif
 
 
+ifeq ($(RAW_ARCH),sparc64)
+  ARCH ?= sparc
+endif
+
 ARCH ?= $(RAW_ARCH)
 ARCH ?= $(RAW_ARCH)
 
 
 LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
 LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)

+ 1 - 1
tools/perf/config/feature-checks/Makefile

@@ -49,7 +49,7 @@ test-hello.bin:
 	$(BUILD)
 	$(BUILD)
 
 
 test-pthread-attr-setaffinity-np.bin:
 test-pthread-attr-setaffinity-np.bin:
-	$(BUILD) -Werror -lpthread
+	$(BUILD) -D_GNU_SOURCE -Werror -lpthread
 
 
 test-stackprotector-all.bin:
 test-stackprotector-all.bin:
 	$(BUILD) -Werror -fstack-protector-all
 	$(BUILD) -Werror -fstack-protector-all

+ 2 - 1
tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c

@@ -5,10 +5,11 @@ int main(void)
 {
 {
 	int ret = 0;
 	int ret = 0;
 	pthread_attr_t thread_attr;
 	pthread_attr_t thread_attr;
+	cpu_set_t cs;
 
 
 	pthread_attr_init(&thread_attr);
 	pthread_attr_init(&thread_attr);
 	/* don't care abt exact args, just the API itself in libpthread */
 	/* don't care abt exact args, just the API itself in libpthread */
-	ret = pthread_attr_setaffinity_np(&thread_attr, 0, NULL);
+	ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cs), &cs);
 
 
 	return ret;
 	return ret;
 }
 }

+ 15 - 3
tools/perf/util/cloexec.c

@@ -25,6 +25,10 @@ static int perf_flag_probe(void)
 	if (cpu < 0)
 	if (cpu < 0)
 		cpu = 0;
 		cpu = 0;
 
 
+	/*
+	 * Using -1 for the pid is a workaround to avoid gratuitous jump label
+	 * changes.
+	 */
 	while (1) {
 	while (1) {
 		/* check cloexec flag */
 		/* check cloexec flag */
 		fd = sys_perf_event_open(&attr, pid, cpu, -1,
 		fd = sys_perf_event_open(&attr, pid, cpu, -1,
@@ -47,16 +51,24 @@ static int perf_flag_probe(void)
 		  err, strerror_r(err, sbuf, sizeof(sbuf)));
 		  err, strerror_r(err, sbuf, sizeof(sbuf)));
 
 
 	/* not supported, confirm error related to PERF_FLAG_FD_CLOEXEC */
 	/* not supported, confirm error related to PERF_FLAG_FD_CLOEXEC */
-	fd = sys_perf_event_open(&attr, pid, cpu, -1, 0);
+	while (1) {
+		fd = sys_perf_event_open(&attr, pid, cpu, -1, 0);
+		if (fd < 0 && pid == -1 && errno == EACCES) {
+			pid = 0;
+			continue;
+		}
+		break;
+	}
 	err = errno;
 	err = errno;
 
 
+	if (fd >= 0)
+		close(fd);
+
 	if (WARN_ONCE(fd < 0 && err != EBUSY,
 	if (WARN_ONCE(fd < 0 && err != EBUSY,
 		      "perf_event_open(..., 0) failed unexpectedly with error %d (%s)\n",
 		      "perf_event_open(..., 0) failed unexpectedly with error %d (%s)\n",
 		      err, strerror_r(err, sbuf, sizeof(sbuf))))
 		      err, strerror_r(err, sbuf, sizeof(sbuf))))
 		return -1;
 		return -1;
 
 
-	close(fd);
-
 	return 0;
 	return 0;
 }
 }
 
 

+ 1 - 1
tools/perf/util/evlist.h

@@ -28,7 +28,7 @@ struct perf_mmap {
 	int		 mask;
 	int		 mask;
 	int		 refcnt;
 	int		 refcnt;
 	unsigned int	 prev;
 	unsigned int	 prev;
-	char		 event_copy[PERF_SAMPLE_MAX_SIZE];
+	char		 event_copy[PERF_SAMPLE_MAX_SIZE] __attribute__((aligned(8)));
 };
 };
 
 
 struct perf_evlist {
 struct perf_evlist {

+ 5 - 0
tools/perf/util/symbol-elf.c

@@ -11,6 +11,11 @@
 #include <symbol/kallsyms.h>
 #include <symbol/kallsyms.h>
 #include "debug.h"
 #include "debug.h"
 
 
+#ifndef EM_AARCH64
+#define EM_AARCH64	183  /* ARM 64 bit */
+#endif
+
+
 #ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
 #ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
 extern char *cplus_demangle(const char *, int);
 extern char *cplus_demangle(const char *, int);