Browse Source

Merge branch 'perfcounters-rename-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'perfcounters-rename-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  perf: Tidy up after the big rename
  perf: Do the big rename: Performance Counters -> Performance Events
  perf_counter: Rename 'event' to event_id/hw_event
  perf_counter: Rename list_entry -> group_entry, counter_list -> group_list

Manually resolved some fairly trivial conflicts with the tracing tree in
include/trace/ftrace.h and kernel/trace/trace_syscalls.c.
Linus Torvalds 16 years ago
parent
commit
43c1266ce4
100 changed files with 1791 additions and 1350 deletions
  1. 1 1
      MAINTAINERS
  2. 1 1
      arch/arm/include/asm/unistd.h
  3. 1 1
      arch/arm/kernel/calls.S
  4. 1 1
      arch/blackfin/include/asm/unistd.h
  5. 1 1
      arch/blackfin/mach-common/entry.S
  6. 1 1
      arch/frv/Kconfig
  7. 5 5
      arch/frv/include/asm/perf_event.h
  8. 1 1
      arch/frv/include/asm/unistd.h
  9. 1 1
      arch/frv/kernel/entry.S
  10. 1 1
      arch/frv/lib/Makefile
  11. 4 4
      arch/frv/lib/perf_event.c
  12. 1 1
      arch/m68k/include/asm/unistd.h
  13. 1 1
      arch/m68k/kernel/entry.S
  14. 1 1
      arch/m68knommu/kernel/syscalltable.S
  15. 1 1
      arch/microblaze/include/asm/unistd.h
  16. 1 1
      arch/microblaze/kernel/syscall_table.S
  17. 3 3
      arch/mips/include/asm/unistd.h
  18. 1 1
      arch/mips/kernel/scall32-o32.S
  19. 1 1
      arch/mips/kernel/scall64-64.S
  20. 1 1
      arch/mips/kernel/scall64-n32.S
  21. 1 1
      arch/mips/kernel/scall64-o32.S
  22. 1 1
      arch/mn10300/include/asm/unistd.h
  23. 1 1
      arch/mn10300/kernel/entry.S
  24. 1 1
      arch/parisc/Kconfig
  25. 0 7
      arch/parisc/include/asm/perf_counter.h
  26. 7 0
      arch/parisc/include/asm/perf_event.h
  27. 2 2
      arch/parisc/include/asm/unistd.h
  28. 1 1
      arch/parisc/kernel/syscall_table.S
  29. 1 1
      arch/powerpc/Kconfig
  30. 11 11
      arch/powerpc/include/asm/hw_irq.h
  31. 1 1
      arch/powerpc/include/asm/paca.h
  32. 13 13
      arch/powerpc/include/asm/perf_event.h
  33. 1 1
      arch/powerpc/include/asm/systbl.h
  34. 1 1
      arch/powerpc/include/asm/unistd.h
  35. 1 1
      arch/powerpc/kernel/Makefile
  36. 1 1
      arch/powerpc/kernel/asm-offsets.c
  37. 4 4
      arch/powerpc/kernel/entry_64.S
  38. 4 4
      arch/powerpc/kernel/irq.c
  39. 1 1
      arch/powerpc/kernel/mpc7450-pmu.c
  40. 1 1
      arch/powerpc/kernel/perf_callchain.c
  41. 291 291
      arch/powerpc/kernel/perf_event.c
  42. 1 1
      arch/powerpc/kernel/power4-pmu.c
  43. 1 1
      arch/powerpc/kernel/power5+-pmu.c
  44. 1 1
      arch/powerpc/kernel/power5-pmu.c
  45. 1 1
      arch/powerpc/kernel/power6-pmu.c
  46. 1 1
      arch/powerpc/kernel/power7-pmu.c
  47. 1 1
      arch/powerpc/kernel/ppc970-pmu.c
  48. 15 15
      arch/powerpc/kernel/time.c
  49. 4 4
      arch/powerpc/mm/fault.c
  50. 2 2
      arch/powerpc/platforms/Kconfig.cputype
  51. 1 1
      arch/s390/Kconfig
  52. 0 10
      arch/s390/include/asm/perf_counter.h
  53. 10 0
      arch/s390/include/asm/perf_event.h
  54. 1 1
      arch/s390/include/asm/unistd.h
  55. 4 4
      arch/s390/kernel/compat_wrapper.S
  56. 1 1
      arch/s390/kernel/syscalls.S
  57. 4 4
      arch/s390/mm/fault.c
  58. 1 1
      arch/sh/Kconfig
  59. 0 9
      arch/sh/include/asm/perf_counter.h
  60. 9 0
      arch/sh/include/asm/perf_event.h
  61. 1 1
      arch/sh/include/asm/unistd_32.h
  62. 1 1
      arch/sh/include/asm/unistd_64.h
  63. 1 1
      arch/sh/kernel/syscalls_32.S
  64. 1 1
      arch/sh/kernel/syscalls_64.S
  65. 4 4
      arch/sh/mm/fault_32.c
  66. 4 4
      arch/sh/mm/tlbflush_64.c
  67. 2 2
      arch/sparc/Kconfig
  68. 0 14
      arch/sparc/include/asm/perf_counter.h
  69. 14 0
      arch/sparc/include/asm/perf_event.h
  70. 1 1
      arch/sparc/include/asm/unistd.h
  71. 1 1
      arch/sparc/kernel/Makefile
  72. 2 2
      arch/sparc/kernel/nmi.c
  73. 5 5
      arch/sparc/kernel/pcr.c
  74. 89 89
      arch/sparc/kernel/perf_event.c
  75. 1 1
      arch/sparc/kernel/systbls_32.S
  76. 2 2
      arch/sparc/kernel/systbls_64.S
  77. 1 1
      arch/x86/Kconfig
  78. 1 1
      arch/x86/ia32/ia32entry.S
  79. 1 1
      arch/x86/include/asm/entry_arch.h
  80. 15 15
      arch/x86/include/asm/perf_event.h
  81. 1 1
      arch/x86/include/asm/unistd_32.h
  82. 2 2
      arch/x86/include/asm/unistd_64.h
  83. 3 3
      arch/x86/kernel/apic/apic.c
  84. 1 1
      arch/x86/kernel/cpu/Makefile
  85. 2 2
      arch/x86/kernel/cpu/common.c
  86. 278 278
      arch/x86/kernel/cpu/perf_event.c
  87. 1 1
      arch/x86/kernel/cpu/perfctr-watchdog.c
  88. 1 1
      arch/x86/kernel/entry_64.S
  89. 1 1
      arch/x86/kernel/irqinit.c
  90. 1 1
      arch/x86/kernel/syscall_table_32.S
  91. 4 4
      arch/x86/mm/fault.c
  92. 2 2
      arch/x86/oprofile/op_model_ppro.c
  93. 1 1
      arch/x86/oprofile/op_x86_model.h
  94. 2 2
      drivers/char/sysrq.c
  95. 3 3
      fs/exec.c
  96. 2 2
      include/asm-generic/unistd.h
  97. 7 7
      include/linux/init_task.h
  98. 40 457
      include/linux/perf_counter.h
  99. 858 0
      include/linux/perf_event.h
  100. 2 2
      include/linux/prctl.h

+ 1 - 1
MAINTAINERS

@@ -4000,7 +4000,7 @@ S:	Maintained
 F:	include/linux/delayacct.h
 F:	include/linux/delayacct.h
 F:	kernel/delayacct.c
 F:	kernel/delayacct.c
 
 
-PERFORMANCE COUNTER SUBSYSTEM
+PERFORMANCE EVENTS SUBSYSTEM
 M:	Peter Zijlstra <a.p.zijlstra@chello.nl>
 M:	Peter Zijlstra <a.p.zijlstra@chello.nl>
 M:	Paul Mackerras <paulus@samba.org>
 M:	Paul Mackerras <paulus@samba.org>
 M:	Ingo Molnar <mingo@elte.hu>
 M:	Ingo Molnar <mingo@elte.hu>

+ 1 - 1
arch/arm/include/asm/unistd.h

@@ -390,7 +390,7 @@
 #define __NR_preadv			(__NR_SYSCALL_BASE+361)
 #define __NR_preadv			(__NR_SYSCALL_BASE+361)
 #define __NR_pwritev			(__NR_SYSCALL_BASE+362)
 #define __NR_pwritev			(__NR_SYSCALL_BASE+362)
 #define __NR_rt_tgsigqueueinfo		(__NR_SYSCALL_BASE+363)
 #define __NR_rt_tgsigqueueinfo		(__NR_SYSCALL_BASE+363)
-#define __NR_perf_counter_open		(__NR_SYSCALL_BASE+364)
+#define __NR_perf_event_open		(__NR_SYSCALL_BASE+364)
 
 
 /*
 /*
  * The following SWIs are ARM private.
  * The following SWIs are ARM private.

+ 1 - 1
arch/arm/kernel/calls.S

@@ -373,7 +373,7 @@
 		CALL(sys_preadv)
 		CALL(sys_preadv)
 		CALL(sys_pwritev)
 		CALL(sys_pwritev)
 		CALL(sys_rt_tgsigqueueinfo)
 		CALL(sys_rt_tgsigqueueinfo)
-		CALL(sys_perf_counter_open)
+		CALL(sys_perf_event_open)
 #ifndef syscalls_counted
 #ifndef syscalls_counted
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 #define syscalls_counted
 #define syscalls_counted

+ 1 - 1
arch/blackfin/include/asm/unistd.h

@@ -381,7 +381,7 @@
 #define __NR_preadv		366
 #define __NR_preadv		366
 #define __NR_pwritev		367
 #define __NR_pwritev		367
 #define __NR_rt_tgsigqueueinfo	368
 #define __NR_rt_tgsigqueueinfo	368
-#define __NR_perf_counter_open	369
+#define __NR_perf_event_open	369
 
 
 #define __NR_syscall		370
 #define __NR_syscall		370
 #define NR_syscalls		__NR_syscall
 #define NR_syscalls		__NR_syscall

+ 1 - 1
arch/blackfin/mach-common/entry.S

@@ -1620,7 +1620,7 @@ ENTRY(_sys_call_table)
 	.long _sys_preadv
 	.long _sys_preadv
 	.long _sys_pwritev
 	.long _sys_pwritev
 	.long _sys_rt_tgsigqueueinfo
 	.long _sys_rt_tgsigqueueinfo
-	.long _sys_perf_counter_open
+	.long _sys_perf_event_open
 
 
 	.rept NR_syscalls-(.-_sys_call_table)/4
 	.rept NR_syscalls-(.-_sys_call_table)/4
 	.long _sys_ni_syscall
 	.long _sys_ni_syscall

+ 1 - 1
arch/frv/Kconfig

@@ -7,7 +7,7 @@ config FRV
 	default y
 	default y
 	select HAVE_IDE
 	select HAVE_IDE
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_ARCH_TRACEHOOK
-	select HAVE_PERF_COUNTERS
+	select HAVE_PERF_EVENTS
 
 
 config ZONE_DMA
 config ZONE_DMA
 	bool
 	bool

+ 5 - 5
arch/frv/include/asm/perf_counter.h → arch/frv/include/asm/perf_event.h

@@ -1,4 +1,4 @@
-/* FRV performance counter support
+/* FRV performance event support
  *
  *
  * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
  * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
  * Written by David Howells (dhowells@redhat.com)
@@ -9,9 +9,9 @@
  * 2 of the Licence, or (at your option) any later version.
  * 2 of the Licence, or (at your option) any later version.
  */
  */
 
 
-#ifndef _ASM_PERF_COUNTER_H
-#define _ASM_PERF_COUNTER_H
+#ifndef _ASM_PERF_EVENT_H
+#define _ASM_PERF_EVENT_H
 
 
-#define PERF_COUNTER_INDEX_OFFSET	0
+#define PERF_EVENT_INDEX_OFFSET	0
 
 
-#endif /* _ASM_PERF_COUNTER_H */
+#endif /* _ASM_PERF_EVENT_H */

+ 1 - 1
arch/frv/include/asm/unistd.h

@@ -342,7 +342,7 @@
 #define __NR_preadv		333
 #define __NR_preadv		333
 #define __NR_pwritev		334
 #define __NR_pwritev		334
 #define __NR_rt_tgsigqueueinfo	335
 #define __NR_rt_tgsigqueueinfo	335
-#define __NR_perf_counter_open	336
+#define __NR_perf_event_open	336
 
 
 #ifdef __KERNEL__
 #ifdef __KERNEL__
 
 

+ 1 - 1
arch/frv/kernel/entry.S

@@ -1525,6 +1525,6 @@ sys_call_table:
 	.long sys_preadv
 	.long sys_preadv
 	.long sys_pwritev
 	.long sys_pwritev
 	.long sys_rt_tgsigqueueinfo	/* 335 */
 	.long sys_rt_tgsigqueueinfo	/* 335 */
-	.long sys_perf_counter_open
+	.long sys_perf_event_open
 
 
 syscall_table_size = (. - sys_call_table)
 syscall_table_size = (. - sys_call_table)

+ 1 - 1
arch/frv/lib/Makefile

@@ -5,4 +5,4 @@
 lib-y := \
 lib-y := \
 	__ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \
 	__ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \
 	checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \
 	checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \
-	outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o perf_counter.o
+	outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o perf_event.o

+ 4 - 4
arch/frv/lib/perf_counter.c → arch/frv/lib/perf_event.c

@@ -1,4 +1,4 @@
-/* Performance counter handling
+/* Performance event handling
  *
  *
  * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
  * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
  * Written by David Howells (dhowells@redhat.com)
@@ -9,11 +9,11 @@
  * 2 of the Licence, or (at your option) any later version.
  * 2 of the Licence, or (at your option) any later version.
  */
  */
 
 
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
 
 
 /*
 /*
- * mark the performance counter as pending
+ * mark the performance event as pending
  */
  */
-void set_perf_counter_pending(void)
+void set_perf_event_pending(void)
 {
 {
 }
 }

+ 1 - 1
arch/m68k/include/asm/unistd.h

@@ -335,7 +335,7 @@
 #define __NR_preadv		329
 #define __NR_preadv		329
 #define __NR_pwritev		330
 #define __NR_pwritev		330
 #define __NR_rt_tgsigqueueinfo	331
 #define __NR_rt_tgsigqueueinfo	331
-#define __NR_perf_counter_open	332
+#define __NR_perf_event_open	332
 
 
 #ifdef __KERNEL__
 #ifdef __KERNEL__
 
 

+ 1 - 1
arch/m68k/kernel/entry.S

@@ -756,5 +756,5 @@ sys_call_table:
 	.long sys_preadv
 	.long sys_preadv
 	.long sys_pwritev		/* 330 */
 	.long sys_pwritev		/* 330 */
 	.long sys_rt_tgsigqueueinfo
 	.long sys_rt_tgsigqueueinfo
-	.long sys_perf_counter_open
+	.long sys_perf_event_open
 
 

+ 1 - 1
arch/m68knommu/kernel/syscalltable.S

@@ -350,7 +350,7 @@ ENTRY(sys_call_table)
 	.long sys_preadv
 	.long sys_preadv
 	.long sys_pwritev		/* 330 */
 	.long sys_pwritev		/* 330 */
 	.long sys_rt_tgsigqueueinfo
 	.long sys_rt_tgsigqueueinfo
-	.long sys_perf_counter_open
+	.long sys_perf_event_open
 
 
 	.rept NR_syscalls-(.-sys_call_table)/4
 	.rept NR_syscalls-(.-sys_call_table)/4
 		.long sys_ni_syscall
 		.long sys_ni_syscall

+ 1 - 1
arch/microblaze/include/asm/unistd.h

@@ -381,7 +381,7 @@
 #define __NR_preadv		363 /* new */
 #define __NR_preadv		363 /* new */
 #define __NR_pwritev		364 /* new */
 #define __NR_pwritev		364 /* new */
 #define __NR_rt_tgsigqueueinfo	365 /* new */
 #define __NR_rt_tgsigqueueinfo	365 /* new */
-#define __NR_perf_counter_open	366 /* new */
+#define __NR_perf_event_open	366 /* new */
 
 
 #define __NR_syscalls		367
 #define __NR_syscalls		367
 
 

+ 1 - 1
arch/microblaze/kernel/syscall_table.S

@@ -370,4 +370,4 @@ ENTRY(sys_call_table)
 	.long sys_ni_syscall
 	.long sys_ni_syscall
 	.long sys_ni_syscall
 	.long sys_ni_syscall
 	.long sys_rt_tgsigqueueinfo	/* 365 */
 	.long sys_rt_tgsigqueueinfo	/* 365 */
-	.long sys_perf_counter_open
+	.long sys_perf_event_open

+ 3 - 3
arch/mips/include/asm/unistd.h

@@ -353,7 +353,7 @@
 #define __NR_preadv			(__NR_Linux + 330)
 #define __NR_preadv			(__NR_Linux + 330)
 #define __NR_pwritev			(__NR_Linux + 331)
 #define __NR_pwritev			(__NR_Linux + 331)
 #define __NR_rt_tgsigqueueinfo		(__NR_Linux + 332)
 #define __NR_rt_tgsigqueueinfo		(__NR_Linux + 332)
-#define __NR_perf_counter_open		(__NR_Linux + 333)
+#define __NR_perf_event_open		(__NR_Linux + 333)
 #define __NR_accept4			(__NR_Linux + 334)
 #define __NR_accept4			(__NR_Linux + 334)
 
 
 /*
 /*
@@ -664,7 +664,7 @@
 #define __NR_preadv			(__NR_Linux + 289)
 #define __NR_preadv			(__NR_Linux + 289)
 #define __NR_pwritev			(__NR_Linux + 290)
 #define __NR_pwritev			(__NR_Linux + 290)
 #define __NR_rt_tgsigqueueinfo		(__NR_Linux + 291)
 #define __NR_rt_tgsigqueueinfo		(__NR_Linux + 291)
-#define __NR_perf_counter_open		(__NR_Linux + 292)
+#define __NR_perf_event_open		(__NR_Linux + 292)
 #define __NR_accept4			(__NR_Linux + 293)
 #define __NR_accept4			(__NR_Linux + 293)
 
 
 /*
 /*
@@ -979,7 +979,7 @@
 #define __NR_preadv			(__NR_Linux + 293)
 #define __NR_preadv			(__NR_Linux + 293)
 #define __NR_pwritev			(__NR_Linux + 294)
 #define __NR_pwritev			(__NR_Linux + 294)
 #define __NR_rt_tgsigqueueinfo		(__NR_Linux + 295)
 #define __NR_rt_tgsigqueueinfo		(__NR_Linux + 295)
-#define __NR_perf_counter_open		(__NR_Linux + 296)
+#define __NR_perf_event_open		(__NR_Linux + 296)
 #define __NR_accept4			(__NR_Linux + 297)
 #define __NR_accept4			(__NR_Linux + 297)
 
 
 /*
 /*

+ 1 - 1
arch/mips/kernel/scall32-o32.S

@@ -581,7 +581,7 @@ einval:	li	v0, -ENOSYS
 	sys	sys_preadv		6	/* 4330 */
 	sys	sys_preadv		6	/* 4330 */
 	sys	sys_pwritev		6
 	sys	sys_pwritev		6
 	sys	sys_rt_tgsigqueueinfo	4
 	sys	sys_rt_tgsigqueueinfo	4
-	sys	sys_perf_counter_open	5
+	sys	sys_perf_event_open	5
 	sys	sys_accept4		4
 	sys	sys_accept4		4
 	.endm
 	.endm
 
 

+ 1 - 1
arch/mips/kernel/scall64-64.S

@@ -418,6 +418,6 @@ sys_call_table:
 	PTR	sys_preadv
 	PTR	sys_preadv
 	PTR	sys_pwritev			/* 5390 */
 	PTR	sys_pwritev			/* 5390 */
 	PTR	sys_rt_tgsigqueueinfo
 	PTR	sys_rt_tgsigqueueinfo
-	PTR	sys_perf_counter_open
+	PTR	sys_perf_event_open
 	PTR	sys_accept4
 	PTR	sys_accept4
 	.size	sys_call_table,.-sys_call_table
 	.size	sys_call_table,.-sys_call_table

+ 1 - 1
arch/mips/kernel/scall64-n32.S

@@ -416,6 +416,6 @@ EXPORT(sysn32_call_table)
 	PTR	sys_preadv
 	PTR	sys_preadv
 	PTR	sys_pwritev
 	PTR	sys_pwritev
 	PTR	compat_sys_rt_tgsigqueueinfo	/* 5295 */
 	PTR	compat_sys_rt_tgsigqueueinfo	/* 5295 */
-	PTR	sys_perf_counter_open
+	PTR	sys_perf_event_open
 	PTR	sys_accept4
 	PTR	sys_accept4
 	.size	sysn32_call_table,.-sysn32_call_table
 	.size	sysn32_call_table,.-sysn32_call_table

+ 1 - 1
arch/mips/kernel/scall64-o32.S

@@ -536,6 +536,6 @@ sys_call_table:
 	PTR	compat_sys_preadv		/* 4330 */
 	PTR	compat_sys_preadv		/* 4330 */
 	PTR	compat_sys_pwritev
 	PTR	compat_sys_pwritev
 	PTR	compat_sys_rt_tgsigqueueinfo
 	PTR	compat_sys_rt_tgsigqueueinfo
-	PTR	sys_perf_counter_open
+	PTR	sys_perf_event_open
 	PTR	sys_accept4
 	PTR	sys_accept4
 	.size	sys_call_table,.-sys_call_table
 	.size	sys_call_table,.-sys_call_table

+ 1 - 1
arch/mn10300/include/asm/unistd.h

@@ -347,7 +347,7 @@
 #define __NR_preadv		334
 #define __NR_preadv		334
 #define __NR_pwritev		335
 #define __NR_pwritev		335
 #define __NR_rt_tgsigqueueinfo	336
 #define __NR_rt_tgsigqueueinfo	336
-#define __NR_perf_counter_open	337
+#define __NR_perf_event_open	337
 
 
 #ifdef __KERNEL__
 #ifdef __KERNEL__
 
 

+ 1 - 1
arch/mn10300/kernel/entry.S

@@ -723,7 +723,7 @@ ENTRY(sys_call_table)
 	.long sys_preadv
 	.long sys_preadv
 	.long sys_pwritev		/* 335 */
 	.long sys_pwritev		/* 335 */
 	.long sys_rt_tgsigqueueinfo
 	.long sys_rt_tgsigqueueinfo
-	.long sys_perf_counter_open
+	.long sys_perf_event_open
 
 
 
 
 nr_syscalls=(.-sys_call_table)/4
 nr_syscalls=(.-sys_call_table)/4

+ 1 - 1
arch/parisc/Kconfig

@@ -16,7 +16,7 @@ config PARISC
 	select RTC_DRV_GENERIC
 	select RTC_DRV_GENERIC
 	select INIT_ALL_POSSIBLE
 	select INIT_ALL_POSSIBLE
 	select BUG
 	select BUG
-	select HAVE_PERF_COUNTERS
+	select HAVE_PERF_EVENTS
 	select GENERIC_ATOMIC64 if !64BIT
 	select GENERIC_ATOMIC64 if !64BIT
 	help
 	help
 	  The PA-RISC microprocessor is designed by Hewlett-Packard and used
 	  The PA-RISC microprocessor is designed by Hewlett-Packard and used

+ 0 - 7
arch/parisc/include/asm/perf_counter.h

@@ -1,7 +0,0 @@
-#ifndef __ASM_PARISC_PERF_COUNTER_H
-#define __ASM_PARISC_PERF_COUNTER_H
-
-/* parisc only supports software counters through this interface. */
-static inline void set_perf_counter_pending(void) { }
-
-#endif /* __ASM_PARISC_PERF_COUNTER_H */

+ 7 - 0
arch/parisc/include/asm/perf_event.h

@@ -0,0 +1,7 @@
+#ifndef __ASM_PARISC_PERF_EVENT_H
+#define __ASM_PARISC_PERF_EVENT_H
+
+/* parisc only supports software events through this interface. */
+static inline void set_perf_event_pending(void) { }
+
+#endif /* __ASM_PARISC_PERF_EVENT_H */

+ 2 - 2
arch/parisc/include/asm/unistd.h

@@ -810,9 +810,9 @@
 #define __NR_preadv		(__NR_Linux + 315)
 #define __NR_preadv		(__NR_Linux + 315)
 #define __NR_pwritev		(__NR_Linux + 316)
 #define __NR_pwritev		(__NR_Linux + 316)
 #define __NR_rt_tgsigqueueinfo	(__NR_Linux + 317)
 #define __NR_rt_tgsigqueueinfo	(__NR_Linux + 317)
-#define __NR_perf_counter_open	(__NR_Linux + 318)
+#define __NR_perf_event_open	(__NR_Linux + 318)
 
 
-#define __NR_Linux_syscalls	(__NR_perf_counter_open + 1)
+#define __NR_Linux_syscalls	(__NR_perf_event_open + 1)
 
 
 
 
 #define __IGNORE_select		/* newselect */
 #define __IGNORE_select		/* newselect */

+ 1 - 1
arch/parisc/kernel/syscall_table.S

@@ -416,7 +416,7 @@
 	ENTRY_COMP(preadv)		/* 315 */
 	ENTRY_COMP(preadv)		/* 315 */
 	ENTRY_COMP(pwritev)
 	ENTRY_COMP(pwritev)
 	ENTRY_COMP(rt_tgsigqueueinfo)
 	ENTRY_COMP(rt_tgsigqueueinfo)
-	ENTRY_SAME(perf_counter_open)
+	ENTRY_SAME(perf_event_open)
 
 
 	/* Nothing yet */
 	/* Nothing yet */
 
 

+ 1 - 1
arch/powerpc/Kconfig

@@ -129,7 +129,7 @@ config PPC
 	select HAVE_OPROFILE
 	select HAVE_OPROFILE
 	select HAVE_SYSCALL_WRAPPERS if PPC64
 	select HAVE_SYSCALL_WRAPPERS if PPC64
 	select GENERIC_ATOMIC64 if PPC32
 	select GENERIC_ATOMIC64 if PPC32
-	select HAVE_PERF_COUNTERS
+	select HAVE_PERF_EVENTS
 
 
 config EARLY_PRINTK
 config EARLY_PRINTK
 	bool
 	bool

+ 11 - 11
arch/powerpc/include/asm/hw_irq.h

@@ -135,43 +135,43 @@ static inline int irqs_disabled_flags(unsigned long flags)
  */
  */
 struct irq_chip;
 struct irq_chip;
 
 
-#ifdef CONFIG_PERF_COUNTERS
+#ifdef CONFIG_PERF_EVENTS
 
 
 #ifdef CONFIG_PPC64
 #ifdef CONFIG_PPC64
-static inline unsigned long test_perf_counter_pending(void)
+static inline unsigned long test_perf_event_pending(void)
 {
 {
 	unsigned long x;
 	unsigned long x;
 
 
 	asm volatile("lbz %0,%1(13)"
 	asm volatile("lbz %0,%1(13)"
 		: "=r" (x)
 		: "=r" (x)
-		: "i" (offsetof(struct paca_struct, perf_counter_pending)));
+		: "i" (offsetof(struct paca_struct, perf_event_pending)));
 	return x;
 	return x;
 }
 }
 
 
-static inline void set_perf_counter_pending(void)
+static inline void set_perf_event_pending(void)
 {
 {
 	asm volatile("stb %0,%1(13)" : :
 	asm volatile("stb %0,%1(13)" : :
 		"r" (1),
 		"r" (1),
-		"i" (offsetof(struct paca_struct, perf_counter_pending)));
+		"i" (offsetof(struct paca_struct, perf_event_pending)));
 }
 }
 
 
-static inline void clear_perf_counter_pending(void)
+static inline void clear_perf_event_pending(void)
 {
 {
 	asm volatile("stb %0,%1(13)" : :
 	asm volatile("stb %0,%1(13)" : :
 		"r" (0),
 		"r" (0),
-		"i" (offsetof(struct paca_struct, perf_counter_pending)));
+		"i" (offsetof(struct paca_struct, perf_event_pending)));
 }
 }
 #endif /* CONFIG_PPC64 */
 #endif /* CONFIG_PPC64 */
 
 
-#else  /* CONFIG_PERF_COUNTERS */
+#else  /* CONFIG_PERF_EVENTS */
 
 
-static inline unsigned long test_perf_counter_pending(void)
+static inline unsigned long test_perf_event_pending(void)
 {
 {
 	return 0;
 	return 0;
 }
 }
 
 
-static inline void clear_perf_counter_pending(void) {}
-#endif /* CONFIG_PERF_COUNTERS */
+static inline void clear_perf_event_pending(void) {}
+#endif /* CONFIG_PERF_EVENTS */
 
 
 #endif	/* __KERNEL__ */
 #endif	/* __KERNEL__ */
 #endif	/* _ASM_POWERPC_HW_IRQ_H */
 #endif	/* _ASM_POWERPC_HW_IRQ_H */

+ 1 - 1
arch/powerpc/include/asm/paca.h

@@ -122,7 +122,7 @@ struct paca_struct {
 	u8 soft_enabled;		/* irq soft-enable flag */
 	u8 soft_enabled;		/* irq soft-enable flag */
 	u8 hard_enabled;		/* set if irqs are enabled in MSR */
 	u8 hard_enabled;		/* set if irqs are enabled in MSR */
 	u8 io_sync;			/* writel() needs spin_unlock sync */
 	u8 io_sync;			/* writel() needs spin_unlock sync */
-	u8 perf_counter_pending;	/* PM interrupt while soft-disabled */
+	u8 perf_event_pending;		/* PM interrupt while soft-disabled */
 
 
 	/* Stuff for accurate time accounting */
 	/* Stuff for accurate time accounting */
 	u64 user_time;			/* accumulated usermode TB ticks */
 	u64 user_time;			/* accumulated usermode TB ticks */

+ 13 - 13
arch/powerpc/include/asm/perf_counter.h → arch/powerpc/include/asm/perf_event.h

@@ -1,5 +1,5 @@
 /*
 /*
- * Performance counter support - PowerPC-specific definitions.
+ * Performance event support - PowerPC-specific definitions.
  *
  *
  * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
  * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
  *
  *
@@ -12,9 +12,9 @@
 
 
 #include <asm/hw_irq.h>
 #include <asm/hw_irq.h>
 
 
-#define MAX_HWCOUNTERS		8
+#define MAX_HWEVENTS		8
 #define MAX_EVENT_ALTERNATIVES	8
 #define MAX_EVENT_ALTERNATIVES	8
-#define MAX_LIMITED_HWCOUNTERS	2
+#define MAX_LIMITED_HWEVENTS	2
 
 
 /*
 /*
  * This struct provides the constants and functions needed to
  * This struct provides the constants and functions needed to
@@ -22,18 +22,18 @@
  */
  */
 struct power_pmu {
 struct power_pmu {
 	const char	*name;
 	const char	*name;
-	int		n_counter;
+	int		n_event;
 	int		max_alternatives;
 	int		max_alternatives;
 	unsigned long	add_fields;
 	unsigned long	add_fields;
 	unsigned long	test_adder;
 	unsigned long	test_adder;
 	int		(*compute_mmcr)(u64 events[], int n_ev,
 	int		(*compute_mmcr)(u64 events[], int n_ev,
 				unsigned int hwc[], unsigned long mmcr[]);
 				unsigned int hwc[], unsigned long mmcr[]);
-	int		(*get_constraint)(u64 event, unsigned long *mskp,
+	int		(*get_constraint)(u64 event_id, unsigned long *mskp,
 				unsigned long *valp);
 				unsigned long *valp);
-	int		(*get_alternatives)(u64 event, unsigned int flags,
+	int		(*get_alternatives)(u64 event_id, unsigned int flags,
 				u64 alt[]);
 				u64 alt[]);
 	void		(*disable_pmc)(unsigned int pmc, unsigned long mmcr[]);
 	void		(*disable_pmc)(unsigned int pmc, unsigned long mmcr[]);
-	int		(*limited_pmc_event)(u64 event);
+	int		(*limited_pmc_event)(u64 event_id);
 	u32		flags;
 	u32		flags;
 	int		n_generic;
 	int		n_generic;
 	int		*generic_events;
 	int		*generic_events;
@@ -61,10 +61,10 @@ struct pt_regs;
 extern unsigned long perf_misc_flags(struct pt_regs *regs);
 extern unsigned long perf_misc_flags(struct pt_regs *regs);
 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
 
 
-#define PERF_COUNTER_INDEX_OFFSET	1
+#define PERF_EVENT_INDEX_OFFSET	1
 
 
 /*
 /*
- * Only override the default definitions in include/linux/perf_counter.h
+ * Only override the default definitions in include/linux/perf_event.h
  * if we have hardware PMU support.
  * if we have hardware PMU support.
  */
  */
 #ifdef CONFIG_PPC_PERF_CTRS
 #ifdef CONFIG_PPC_PERF_CTRS
@@ -73,14 +73,14 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
 
 
 /*
 /*
  * The power_pmu.get_constraint function returns a 32/64-bit value and
  * The power_pmu.get_constraint function returns a 32/64-bit value and
- * a 32/64-bit mask that express the constraints between this event and
+ * a 32/64-bit mask that express the constraints between this event_id and
  * other events.
  * other events.
  *
  *
  * The value and mask are divided up into (non-overlapping) bitfields
  * The value and mask are divided up into (non-overlapping) bitfields
  * of three different types:
  * of three different types:
  *
  *
  * Select field: this expresses the constraint that some set of bits
  * Select field: this expresses the constraint that some set of bits
- * in MMCR* needs to be set to a specific value for this event.  For a
+ * in MMCR* needs to be set to a specific value for this event_id.  For a
  * select field, the mask contains 1s in every bit of the field, and
  * select field, the mask contains 1s in every bit of the field, and
  * the value contains a unique value for each possible setting of the
  * the value contains a unique value for each possible setting of the
  * MMCR* bits.  The constraint checking code will ensure that two events
  * MMCR* bits.  The constraint checking code will ensure that two events
@@ -102,9 +102,9 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
  * possible.)  For N classes, the field is N+1 bits wide, and each class
  * possible.)  For N classes, the field is N+1 bits wide, and each class
  * is assigned one bit from the least-significant N bits.  The mask has
  * is assigned one bit from the least-significant N bits.  The mask has
  * only the most-significant bit set, and the value has only the bit
  * only the most-significant bit set, and the value has only the bit
- * for the event's class set.  The test_adder has the least significant
+ * for the event_id's class set.  The test_adder has the least significant
  * bit set in the field.
  * bit set in the field.
  *
  *
- * If an event is not subject to the constraint expressed by a particular
+ * If an event_id is not subject to the constraint expressed by a particular
  * field, then it will have 0 in both the mask and value for that field.
  * field, then it will have 0 in both the mask and value for that field.
  */
  */

+ 1 - 1
arch/powerpc/include/asm/systbl.h

@@ -322,7 +322,7 @@ SYSCALL_SPU(epoll_create1)
 SYSCALL_SPU(dup3)
 SYSCALL_SPU(dup3)
 SYSCALL_SPU(pipe2)
 SYSCALL_SPU(pipe2)
 SYSCALL(inotify_init1)
 SYSCALL(inotify_init1)
-SYSCALL_SPU(perf_counter_open)
+SYSCALL_SPU(perf_event_open)
 COMPAT_SYS_SPU(preadv)
 COMPAT_SYS_SPU(preadv)
 COMPAT_SYS_SPU(pwritev)
 COMPAT_SYS_SPU(pwritev)
 COMPAT_SYS(rt_tgsigqueueinfo)
 COMPAT_SYS(rt_tgsigqueueinfo)

+ 1 - 1
arch/powerpc/include/asm/unistd.h

@@ -341,7 +341,7 @@
 #define __NR_dup3		316
 #define __NR_dup3		316
 #define __NR_pipe2		317
 #define __NR_pipe2		317
 #define __NR_inotify_init1	318
 #define __NR_inotify_init1	318
-#define __NR_perf_counter_open	319
+#define __NR_perf_event_open	319
 #define __NR_preadv		320
 #define __NR_preadv		320
 #define __NR_pwritev		321
 #define __NR_pwritev		321
 #define __NR_rt_tgsigqueueinfo	322
 #define __NR_rt_tgsigqueueinfo	322

+ 1 - 1
arch/powerpc/kernel/Makefile

@@ -97,7 +97,7 @@ obj64-$(CONFIG_AUDIT)		+= compat_audit.o
 
 
 obj-$(CONFIG_DYNAMIC_FTRACE)	+= ftrace.o
 obj-$(CONFIG_DYNAMIC_FTRACE)	+= ftrace.o
 obj-$(CONFIG_FUNCTION_GRAPH_TRACER)	+= ftrace.o
 obj-$(CONFIG_FUNCTION_GRAPH_TRACER)	+= ftrace.o
-obj-$(CONFIG_PPC_PERF_CTRS)	+= perf_counter.o perf_callchain.o
+obj-$(CONFIG_PPC_PERF_CTRS)	+= perf_event.o perf_callchain.o
 obj64-$(CONFIG_PPC_PERF_CTRS)	+= power4-pmu.o ppc970-pmu.o power5-pmu.o \
 obj64-$(CONFIG_PPC_PERF_CTRS)	+= power4-pmu.o ppc970-pmu.o power5-pmu.o \
 				   power5+-pmu.o power6-pmu.o power7-pmu.o
 				   power5+-pmu.o power6-pmu.o power7-pmu.o
 obj32-$(CONFIG_PPC_PERF_CTRS)	+= mpc7450-pmu.o
 obj32-$(CONFIG_PPC_PERF_CTRS)	+= mpc7450-pmu.o

+ 1 - 1
arch/powerpc/kernel/asm-offsets.c

@@ -133,7 +133,7 @@ int main(void)
 	DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
 	DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
 	DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
 	DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
 	DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
 	DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
-	DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_counter_pending));
+	DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_event_pending));
 	DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
 	DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
 #ifdef CONFIG_PPC_MM_SLICES
 #ifdef CONFIG_PPC_MM_SLICES
 	DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
 	DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,

+ 4 - 4
arch/powerpc/kernel/entry_64.S

@@ -556,14 +556,14 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
 2:
 2:
 	TRACE_AND_RESTORE_IRQ(r5);
 	TRACE_AND_RESTORE_IRQ(r5);
 
 
-#ifdef CONFIG_PERF_COUNTERS
-	/* check paca->perf_counter_pending if we're enabling ints */
+#ifdef CONFIG_PERF_EVENTS
+	/* check paca->perf_event_pending if we're enabling ints */
 	lbz	r3,PACAPERFPEND(r13)
 	lbz	r3,PACAPERFPEND(r13)
 	and.	r3,r3,r5
 	and.	r3,r3,r5
 	beq	27f
 	beq	27f
-	bl	.perf_counter_do_pending
+	bl	.perf_event_do_pending
 27:
 27:
-#endif /* CONFIG_PERF_COUNTERS */
+#endif /* CONFIG_PERF_EVENTS */
 
 
 	/* extract EE bit and use it to restore paca->hard_enabled */
 	/* extract EE bit and use it to restore paca->hard_enabled */
 	ld	r3,_MSR(r1)
 	ld	r3,_MSR(r1)

+ 4 - 4
arch/powerpc/kernel/irq.c

@@ -53,7 +53,7 @@
 #include <linux/bootmem.h>
 #include <linux/bootmem.h>
 #include <linux/pci.h>
 #include <linux/pci.h>
 #include <linux/debugfs.h>
 #include <linux/debugfs.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
 
 
 #include <asm/uaccess.h>
 #include <asm/uaccess.h>
 #include <asm/system.h>
 #include <asm/system.h>
@@ -138,9 +138,9 @@ notrace void raw_local_irq_restore(unsigned long en)
 	}
 	}
 #endif /* CONFIG_PPC_STD_MMU_64 */
 #endif /* CONFIG_PPC_STD_MMU_64 */
 
 
-	if (test_perf_counter_pending()) {
-		clear_perf_counter_pending();
-		perf_counter_do_pending();
+	if (test_perf_event_pending()) {
+		clear_perf_event_pending();
+		perf_event_do_pending();
 	}
 	}
 
 
 	/*
 	/*

+ 1 - 1
arch/powerpc/kernel/mpc7450-pmu.c

@@ -9,7 +9,7 @@
  * 2 of the License, or (at your option) any later version.
  * 2 of the License, or (at your option) any later version.
  */
  */
 #include <linux/string.h>
 #include <linux/string.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
 #include <asm/reg.h>
 #include <asm/reg.h>
 #include <asm/cputable.h>
 #include <asm/cputable.h>
 
 

+ 1 - 1
arch/powerpc/kernel/perf_callchain.c

@@ -10,7 +10,7 @@
  */
  */
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/sched.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
 #include <linux/percpu.h>
 #include <linux/percpu.h>
 #include <linux/uaccess.h>
 #include <linux/uaccess.h>
 #include <linux/mm.h>
 #include <linux/mm.h>

+ 291 - 291
arch/powerpc/kernel/perf_counter.c → arch/powerpc/kernel/perf_event.c

@@ -1,5 +1,5 @@
 /*
 /*
- * Performance counter support - powerpc architecture code
+ * Performance event support - powerpc architecture code
  *
  *
  * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
  * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
  *
  *
@@ -10,7 +10,7 @@
  */
  */
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/sched.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
 #include <linux/percpu.h>
 #include <linux/percpu.h>
 #include <linux/hardirq.h>
 #include <linux/hardirq.h>
 #include <asm/reg.h>
 #include <asm/reg.h>
@@ -19,24 +19,24 @@
 #include <asm/firmware.h>
 #include <asm/firmware.h>
 #include <asm/ptrace.h>
 #include <asm/ptrace.h>
 
 
-struct cpu_hw_counters {
-	int n_counters;
+struct cpu_hw_events {
+	int n_events;
 	int n_percpu;
 	int n_percpu;
 	int disabled;
 	int disabled;
 	int n_added;
 	int n_added;
 	int n_limited;
 	int n_limited;
 	u8  pmcs_enabled;
 	u8  pmcs_enabled;
-	struct perf_counter *counter[MAX_HWCOUNTERS];
-	u64 events[MAX_HWCOUNTERS];
-	unsigned int flags[MAX_HWCOUNTERS];
+	struct perf_event *event[MAX_HWEVENTS];
+	u64 events[MAX_HWEVENTS];
+	unsigned int flags[MAX_HWEVENTS];
 	unsigned long mmcr[3];
 	unsigned long mmcr[3];
-	struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS];
-	u8  limited_hwidx[MAX_LIMITED_HWCOUNTERS];
-	u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
-	unsigned long amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
-	unsigned long avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
+	struct perf_event *limited_event[MAX_LIMITED_HWEVENTS];
+	u8  limited_hwidx[MAX_LIMITED_HWEVENTS];
+	u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
+	unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
+	unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
 };
 };
-DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters);
+DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
 
 
 struct power_pmu *ppmu;
 struct power_pmu *ppmu;
 
 
@@ -47,7 +47,7 @@ struct power_pmu *ppmu;
  * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
  * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
  * then we need to use the FCHV bit to ignore kernel events.
  * then we need to use the FCHV bit to ignore kernel events.
  */
  */
-static unsigned int freeze_counters_kernel = MMCR0_FCS;
+static unsigned int freeze_events_kernel = MMCR0_FCS;
 
 
 /*
 /*
  * 32-bit doesn't have MMCRA but does have an MMCR2,
  * 32-bit doesn't have MMCRA but does have an MMCR2,
@@ -122,14 +122,14 @@ static inline u32 perf_get_misc_flags(struct pt_regs *regs)
 
 
 	if (ppmu->flags & PPMU_ALT_SIPR) {
 	if (ppmu->flags & PPMU_ALT_SIPR) {
 		if (mmcra & POWER6_MMCRA_SIHV)
 		if (mmcra & POWER6_MMCRA_SIHV)
-			return PERF_EVENT_MISC_HYPERVISOR;
+			return PERF_RECORD_MISC_HYPERVISOR;
 		return (mmcra & POWER6_MMCRA_SIPR) ?
 		return (mmcra & POWER6_MMCRA_SIPR) ?
-			PERF_EVENT_MISC_USER : PERF_EVENT_MISC_KERNEL;
+			PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL;
 	}
 	}
 	if (mmcra & MMCRA_SIHV)
 	if (mmcra & MMCRA_SIHV)
-		return PERF_EVENT_MISC_HYPERVISOR;
-	return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
-		PERF_EVENT_MISC_KERNEL;
+		return PERF_RECORD_MISC_HYPERVISOR;
+	return (mmcra & MMCRA_SIPR) ? PERF_RECORD_MISC_USER :
+		PERF_RECORD_MISC_KERNEL;
 }
 }
 
 
 /*
 /*
@@ -152,9 +152,9 @@ static inline int perf_intr_is_nmi(struct pt_regs *regs)
 
 
 #endif /* CONFIG_PPC64 */
 #endif /* CONFIG_PPC64 */
 
 
-static void perf_counter_interrupt(struct pt_regs *regs);
+static void perf_event_interrupt(struct pt_regs *regs);
 
 
-void perf_counter_print_debug(void)
+void perf_event_print_debug(void)
 {
 {
 }
 }
 
 
@@ -240,31 +240,31 @@ static void write_pmc(int idx, unsigned long val)
  * Check if a set of events can all go on the PMU at once.
  * Check if a set of events can all go on the PMU at once.
  * If they can't, this will look at alternative codes for the events
  * If they can't, this will look at alternative codes for the events
  * and see if any combination of alternative codes is feasible.
  * and see if any combination of alternative codes is feasible.
- * The feasible set is returned in event[].
+ * The feasible set is returned in event_id[].
  */
  */
-static int power_check_constraints(struct cpu_hw_counters *cpuhw,
-				   u64 event[], unsigned int cflags[],
+static int power_check_constraints(struct cpu_hw_events *cpuhw,
+				   u64 event_id[], unsigned int cflags[],
 				   int n_ev)
 				   int n_ev)
 {
 {
 	unsigned long mask, value, nv;
 	unsigned long mask, value, nv;
-	unsigned long smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS];
-	int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS];
+	unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS];
+	int n_alt[MAX_HWEVENTS], choice[MAX_HWEVENTS];
 	int i, j;
 	int i, j;
 	unsigned long addf = ppmu->add_fields;
 	unsigned long addf = ppmu->add_fields;
 	unsigned long tadd = ppmu->test_adder;
 	unsigned long tadd = ppmu->test_adder;
 
 
-	if (n_ev > ppmu->n_counter)
+	if (n_ev > ppmu->n_event)
 		return -1;
 		return -1;
 
 
 	/* First see if the events will go on as-is */
 	/* First see if the events will go on as-is */
 	for (i = 0; i < n_ev; ++i) {
 	for (i = 0; i < n_ev; ++i) {
 		if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
 		if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
-		    && !ppmu->limited_pmc_event(event[i])) {
-			ppmu->get_alternatives(event[i], cflags[i],
+		    && !ppmu->limited_pmc_event(event_id[i])) {
+			ppmu->get_alternatives(event_id[i], cflags[i],
 					       cpuhw->alternatives[i]);
 					       cpuhw->alternatives[i]);
-			event[i] = cpuhw->alternatives[i][0];
+			event_id[i] = cpuhw->alternatives[i][0];
 		}
 		}
-		if (ppmu->get_constraint(event[i], &cpuhw->amasks[i][0],
+		if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0],
 					 &cpuhw->avalues[i][0]))
 					 &cpuhw->avalues[i][0]))
 			return -1;
 			return -1;
 	}
 	}
@@ -287,7 +287,7 @@ static int power_check_constraints(struct cpu_hw_counters *cpuhw,
 		return -1;
 		return -1;
 	for (i = 0; i < n_ev; ++i) {
 	for (i = 0; i < n_ev; ++i) {
 		choice[i] = 0;
 		choice[i] = 0;
-		n_alt[i] = ppmu->get_alternatives(event[i], cflags[i],
+		n_alt[i] = ppmu->get_alternatives(event_id[i], cflags[i],
 						  cpuhw->alternatives[i]);
 						  cpuhw->alternatives[i]);
 		for (j = 1; j < n_alt[i]; ++j)
 		for (j = 1; j < n_alt[i]; ++j)
 			ppmu->get_constraint(cpuhw->alternatives[i][j],
 			ppmu->get_constraint(cpuhw->alternatives[i][j],
@@ -307,7 +307,7 @@ static int power_check_constraints(struct cpu_hw_counters *cpuhw,
 			j = choice[i];
 			j = choice[i];
 		}
 		}
 		/*
 		/*
-		 * See if any alternative k for event i,
+		 * See if any alternative k for event_id i,
 		 * where k > j, will satisfy the constraints.
 		 * where k > j, will satisfy the constraints.
 		 */
 		 */
 		while (++j < n_alt[i]) {
 		while (++j < n_alt[i]) {
@@ -321,16 +321,16 @@ static int power_check_constraints(struct cpu_hw_counters *cpuhw,
 		if (j >= n_alt[i]) {
 		if (j >= n_alt[i]) {
 			/*
 			/*
 			 * No feasible alternative, backtrack
 			 * No feasible alternative, backtrack
-			 * to event i-1 and continue enumerating its
+			 * to event_id i-1 and continue enumerating its
 			 * alternatives from where we got up to.
 			 * alternatives from where we got up to.
 			 */
 			 */
 			if (--i < 0)
 			if (--i < 0)
 				return -1;
 				return -1;
 		} else {
 		} else {
 			/*
 			/*
-			 * Found a feasible alternative for event i,
-			 * remember where we got up to with this event,
-			 * go on to the next event, and start with
+			 * Found a feasible alternative for event_id i,
+			 * remember where we got up to with this event_id,
+			 * go on to the next event_id, and start with
 			 * the first alternative for it.
 			 * the first alternative for it.
 			 */
 			 */
 			choice[i] = j;
 			choice[i] = j;
@@ -345,21 +345,21 @@ static int power_check_constraints(struct cpu_hw_counters *cpuhw,
 
 
 	/* OK, we have a feasible combination, tell the caller the solution */
 	/* OK, we have a feasible combination, tell the caller the solution */
 	for (i = 0; i < n_ev; ++i)
 	for (i = 0; i < n_ev; ++i)
-		event[i] = cpuhw->alternatives[i][choice[i]];
+		event_id[i] = cpuhw->alternatives[i][choice[i]];
 	return 0;
 	return 0;
 }
 }
 
 
 /*
 /*
- * Check if newly-added counters have consistent settings for
+ * Check if newly-added events have consistent settings for
  * exclude_{user,kernel,hv} with each other and any previously
  * exclude_{user,kernel,hv} with each other and any previously
- * added counters.
+ * added events.
  */
  */
-static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[],
+static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
 			  int n_prev, int n_new)
 			  int n_prev, int n_new)
 {
 {
 	int eu = 0, ek = 0, eh = 0;
 	int eu = 0, ek = 0, eh = 0;
 	int i, n, first;
 	int i, n, first;
-	struct perf_counter *counter;
+	struct perf_event *event;
 
 
 	n = n_prev + n_new;
 	n = n_prev + n_new;
 	if (n <= 1)
 	if (n <= 1)
@@ -371,15 +371,15 @@ static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[],
 			cflags[i] &= ~PPMU_LIMITED_PMC_REQD;
 			cflags[i] &= ~PPMU_LIMITED_PMC_REQD;
 			continue;
 			continue;
 		}
 		}
-		counter = ctrs[i];
+		event = ctrs[i];
 		if (first) {
 		if (first) {
-			eu = counter->attr.exclude_user;
-			ek = counter->attr.exclude_kernel;
-			eh = counter->attr.exclude_hv;
+			eu = event->attr.exclude_user;
+			ek = event->attr.exclude_kernel;
+			eh = event->attr.exclude_hv;
 			first = 0;
 			first = 0;
-		} else if (counter->attr.exclude_user != eu ||
-			   counter->attr.exclude_kernel != ek ||
-			   counter->attr.exclude_hv != eh) {
+		} else if (event->attr.exclude_user != eu ||
+			   event->attr.exclude_kernel != ek ||
+			   event->attr.exclude_hv != eh) {
 			return -EAGAIN;
 			return -EAGAIN;
 		}
 		}
 	}
 	}
@@ -392,11 +392,11 @@ static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[],
 	return 0;
 	return 0;
 }
 }
 
 
-static void power_pmu_read(struct perf_counter *counter)
+static void power_pmu_read(struct perf_event *event)
 {
 {
 	s64 val, delta, prev;
 	s64 val, delta, prev;
 
 
-	if (!counter->hw.idx)
+	if (!event->hw.idx)
 		return;
 		return;
 	/*
 	/*
 	 * Performance monitor interrupts come even when interrupts
 	 * Performance monitor interrupts come even when interrupts
@@ -404,21 +404,21 @@ static void power_pmu_read(struct perf_counter *counter)
 	 * Therefore we treat them like NMIs.
 	 * Therefore we treat them like NMIs.
 	 */
 	 */
 	do {
 	do {
-		prev = atomic64_read(&counter->hw.prev_count);
+		prev = atomic64_read(&event->hw.prev_count);
 		barrier();
 		barrier();
-		val = read_pmc(counter->hw.idx);
-	} while (atomic64_cmpxchg(&counter->hw.prev_count, prev, val) != prev);
+		val = read_pmc(event->hw.idx);
+	} while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
 
 
 	/* The counters are only 32 bits wide */
 	/* The counters are only 32 bits wide */
 	delta = (val - prev) & 0xfffffffful;
 	delta = (val - prev) & 0xfffffffful;
-	atomic64_add(delta, &counter->count);
-	atomic64_sub(delta, &counter->hw.period_left);
+	atomic64_add(delta, &event->count);
+	atomic64_sub(delta, &event->hw.period_left);
 }
 }
 
 
 /*
 /*
  * On some machines, PMC5 and PMC6 can't be written, don't respect
  * On some machines, PMC5 and PMC6 can't be written, don't respect
  * the freeze conditions, and don't generate interrupts.  This tells
  * the freeze conditions, and don't generate interrupts.  This tells
- * us if `counter' is using such a PMC.
+ * us if `event' is using such a PMC.
  */
  */
 static int is_limited_pmc(int pmcnum)
 static int is_limited_pmc(int pmcnum)
 {
 {
@@ -426,53 +426,53 @@ static int is_limited_pmc(int pmcnum)
 		&& (pmcnum == 5 || pmcnum == 6);
 		&& (pmcnum == 5 || pmcnum == 6);
 }
 }
 
 
-static void freeze_limited_counters(struct cpu_hw_counters *cpuhw,
+static void freeze_limited_events(struct cpu_hw_events *cpuhw,
 				    unsigned long pmc5, unsigned long pmc6)
 				    unsigned long pmc5, unsigned long pmc6)
 {
 {
-	struct perf_counter *counter;
+	struct perf_event *event;
 	u64 val, prev, delta;
 	u64 val, prev, delta;
 	int i;
 	int i;
 
 
 	for (i = 0; i < cpuhw->n_limited; ++i) {
 	for (i = 0; i < cpuhw->n_limited; ++i) {
-		counter = cpuhw->limited_counter[i];
-		if (!counter->hw.idx)
+		event = cpuhw->limited_event[i];
+		if (!event->hw.idx)
 			continue;
 			continue;
-		val = (counter->hw.idx == 5) ? pmc5 : pmc6;
-		prev = atomic64_read(&counter->hw.prev_count);
-		counter->hw.idx = 0;
+		val = (event->hw.idx == 5) ? pmc5 : pmc6;
+		prev = atomic64_read(&event->hw.prev_count);
+		event->hw.idx = 0;
 		delta = (val - prev) & 0xfffffffful;
 		delta = (val - prev) & 0xfffffffful;
-		atomic64_add(delta, &counter->count);
+		atomic64_add(delta, &event->count);
 	}
 	}
 }
 }
 
 
-static void thaw_limited_counters(struct cpu_hw_counters *cpuhw,
+static void thaw_limited_events(struct cpu_hw_events *cpuhw,
 				  unsigned long pmc5, unsigned long pmc6)
 				  unsigned long pmc5, unsigned long pmc6)
 {
 {
-	struct perf_counter *counter;
+	struct perf_event *event;
 	u64 val;
 	u64 val;
 	int i;
 	int i;
 
 
 	for (i = 0; i < cpuhw->n_limited; ++i) {
 	for (i = 0; i < cpuhw->n_limited; ++i) {
-		counter = cpuhw->limited_counter[i];
-		counter->hw.idx = cpuhw->limited_hwidx[i];
-		val = (counter->hw.idx == 5) ? pmc5 : pmc6;
-		atomic64_set(&counter->hw.prev_count, val);
-		perf_counter_update_userpage(counter);
+		event = cpuhw->limited_event[i];
+		event->hw.idx = cpuhw->limited_hwidx[i];
+		val = (event->hw.idx == 5) ? pmc5 : pmc6;
+		atomic64_set(&event->hw.prev_count, val);
+		perf_event_update_userpage(event);
 	}
 	}
 }
 }
 
 
 /*
 /*
- * Since limited counters don't respect the freeze conditions, we
+ * Since limited events don't respect the freeze conditions, we
  * have to read them immediately after freezing or unfreezing the
  * have to read them immediately after freezing or unfreezing the
- * other counters.  We try to keep the values from the limited
- * counters as consistent as possible by keeping the delay (in
+ * other events.  We try to keep the values from the limited
+ * events as consistent as possible by keeping the delay (in
  * cycles and instructions) between freezing/unfreezing and reading
  * cycles and instructions) between freezing/unfreezing and reading
- * the limited counters as small and consistent as possible.
- * Therefore, if any limited counters are in use, we read them
+ * the limited events as small and consistent as possible.
+ * Therefore, if any limited events are in use, we read them
  * both, and always in the same order, to minimize variability,
  * both, and always in the same order, to minimize variability,
  * and do it inside the same asm that writes MMCR0.
  * and do it inside the same asm that writes MMCR0.
  */
  */
-static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
+static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
 {
 {
 	unsigned long pmc5, pmc6;
 	unsigned long pmc5, pmc6;
 
 
@@ -485,7 +485,7 @@ static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
 	 * Write MMCR0, then read PMC5 and PMC6 immediately.
 	 * Write MMCR0, then read PMC5 and PMC6 immediately.
 	 * To ensure we don't get a performance monitor interrupt
 	 * To ensure we don't get a performance monitor interrupt
 	 * between writing MMCR0 and freezing/thawing the limited
 	 * between writing MMCR0 and freezing/thawing the limited
-	 * counters, we first write MMCR0 with the counter overflow
+	 * events, we first write MMCR0 with the event overflow
 	 * interrupt enable bits turned off.
 	 * interrupt enable bits turned off.
 	 */
 	 */
 	asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
 	asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
@@ -495,12 +495,12 @@ static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
 		       "i" (SPRN_PMC5), "i" (SPRN_PMC6));
 		       "i" (SPRN_PMC5), "i" (SPRN_PMC6));
 
 
 	if (mmcr0 & MMCR0_FC)
 	if (mmcr0 & MMCR0_FC)
-		freeze_limited_counters(cpuhw, pmc5, pmc6);
+		freeze_limited_events(cpuhw, pmc5, pmc6);
 	else
 	else
-		thaw_limited_counters(cpuhw, pmc5, pmc6);
+		thaw_limited_events(cpuhw, pmc5, pmc6);
 
 
 	/*
 	/*
-	 * Write the full MMCR0 including the counter overflow interrupt
+	 * Write the full MMCR0 including the event overflow interrupt
 	 * enable bits, if necessary.
 	 * enable bits, if necessary.
 	 */
 	 */
 	if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE))
 	if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE))
@@ -508,18 +508,18 @@ static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
 }
 }
 
 
 /*
 /*
- * Disable all counters to prevent PMU interrupts and to allow
- * counters to be added or removed.
+ * Disable all events to prevent PMU interrupts and to allow
+ * events to be added or removed.
  */
  */
 void hw_perf_disable(void)
 void hw_perf_disable(void)
 {
 {
-	struct cpu_hw_counters *cpuhw;
+	struct cpu_hw_events *cpuhw;
 	unsigned long flags;
 	unsigned long flags;
 
 
 	if (!ppmu)
 	if (!ppmu)
 		return;
 		return;
 	local_irq_save(flags);
 	local_irq_save(flags);
-	cpuhw = &__get_cpu_var(cpu_hw_counters);
+	cpuhw = &__get_cpu_var(cpu_hw_events);
 
 
 	if (!cpuhw->disabled) {
 	if (!cpuhw->disabled) {
 		cpuhw->disabled = 1;
 		cpuhw->disabled = 1;
@@ -545,7 +545,7 @@ void hw_perf_disable(void)
 		/*
 		/*
 		 * Set the 'freeze counters' bit.
 		 * Set the 'freeze counters' bit.
 		 * The barrier is to make sure the mtspr has been
 		 * The barrier is to make sure the mtspr has been
-		 * executed and the PMU has frozen the counters
+		 * executed and the PMU has frozen the events
 		 * before we return.
 		 * before we return.
 		 */
 		 */
 		write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
 		write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
@@ -555,26 +555,26 @@ void hw_perf_disable(void)
 }
 }
 
 
 /*
 /*
- * Re-enable all counters if disable == 0.
- * If we were previously disabled and counters were added, then
+ * Re-enable all events if disable == 0.
+ * If we were previously disabled and events were added, then
  * put the new config on the PMU.
  * put the new config on the PMU.
  */
  */
 void hw_perf_enable(void)
 void hw_perf_enable(void)
 {
 {
-	struct perf_counter *counter;
-	struct cpu_hw_counters *cpuhw;
+	struct perf_event *event;
+	struct cpu_hw_events *cpuhw;
 	unsigned long flags;
 	unsigned long flags;
 	long i;
 	long i;
 	unsigned long val;
 	unsigned long val;
 	s64 left;
 	s64 left;
-	unsigned int hwc_index[MAX_HWCOUNTERS];
+	unsigned int hwc_index[MAX_HWEVENTS];
 	int n_lim;
 	int n_lim;
 	int idx;
 	int idx;
 
 
 	if (!ppmu)
 	if (!ppmu)
 		return;
 		return;
 	local_irq_save(flags);
 	local_irq_save(flags);
-	cpuhw = &__get_cpu_var(cpu_hw_counters);
+	cpuhw = &__get_cpu_var(cpu_hw_events);
 	if (!cpuhw->disabled) {
 	if (!cpuhw->disabled) {
 		local_irq_restore(flags);
 		local_irq_restore(flags);
 		return;
 		return;
@@ -582,23 +582,23 @@ void hw_perf_enable(void)
 	cpuhw->disabled = 0;
 	cpuhw->disabled = 0;
 
 
 	/*
 	/*
-	 * If we didn't change anything, or only removed counters,
+	 * If we didn't change anything, or only removed events,
 	 * no need to recalculate MMCR* settings and reset the PMCs.
 	 * no need to recalculate MMCR* settings and reset the PMCs.
 	 * Just reenable the PMU with the current MMCR* settings
 	 * Just reenable the PMU with the current MMCR* settings
-	 * (possibly updated for removal of counters).
+	 * (possibly updated for removal of events).
 	 */
 	 */
 	if (!cpuhw->n_added) {
 	if (!cpuhw->n_added) {
 		mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
 		mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
 		mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
 		mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
-		if (cpuhw->n_counters == 0)
+		if (cpuhw->n_events == 0)
 			ppc_set_pmu_inuse(0);
 			ppc_set_pmu_inuse(0);
 		goto out_enable;
 		goto out_enable;
 	}
 	}
 
 
 	/*
 	/*
-	 * Compute MMCR* values for the new set of counters
+	 * Compute MMCR* values for the new set of events
 	 */
 	 */
-	if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_counters, hwc_index,
+	if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index,
 			       cpuhw->mmcr)) {
 			       cpuhw->mmcr)) {
 		/* shouldn't ever get here */
 		/* shouldn't ever get here */
 		printk(KERN_ERR "oops compute_mmcr failed\n");
 		printk(KERN_ERR "oops compute_mmcr failed\n");
@@ -607,22 +607,22 @@ void hw_perf_enable(void)
 
 
 	/*
 	/*
 	 * Add in MMCR0 freeze bits corresponding to the
 	 * Add in MMCR0 freeze bits corresponding to the
-	 * attr.exclude_* bits for the first counter.
-	 * We have already checked that all counters have the
-	 * same values for these bits as the first counter.
+	 * attr.exclude_* bits for the first event.
+	 * We have already checked that all events have the
+	 * same values for these bits as the first event.
 	 */
 	 */
-	counter = cpuhw->counter[0];
-	if (counter->attr.exclude_user)
+	event = cpuhw->event[0];
+	if (event->attr.exclude_user)
 		cpuhw->mmcr[0] |= MMCR0_FCP;
 		cpuhw->mmcr[0] |= MMCR0_FCP;
-	if (counter->attr.exclude_kernel)
-		cpuhw->mmcr[0] |= freeze_counters_kernel;
-	if (counter->attr.exclude_hv)
+	if (event->attr.exclude_kernel)
+		cpuhw->mmcr[0] |= freeze_events_kernel;
+	if (event->attr.exclude_hv)
 		cpuhw->mmcr[0] |= MMCR0_FCHV;
 		cpuhw->mmcr[0] |= MMCR0_FCHV;
 
 
 	/*
 	/*
 	 * Write the new configuration to MMCR* with the freeze
 	 * Write the new configuration to MMCR* with the freeze
-	 * bit set and set the hardware counters to their initial values.
-	 * Then unfreeze the counters.
+	 * bit set and set the hardware events to their initial values.
+	 * Then unfreeze the events.
 	 */
 	 */
 	ppc_set_pmu_inuse(1);
 	ppc_set_pmu_inuse(1);
 	mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
 	mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
@@ -631,43 +631,43 @@ void hw_perf_enable(void)
 				| MMCR0_FC);
 				| MMCR0_FC);
 
 
 	/*
 	/*
-	 * Read off any pre-existing counters that need to move
+	 * Read off any pre-existing events that need to move
 	 * to another PMC.
 	 * to another PMC.
 	 */
 	 */
-	for (i = 0; i < cpuhw->n_counters; ++i) {
-		counter = cpuhw->counter[i];
-		if (counter->hw.idx && counter->hw.idx != hwc_index[i] + 1) {
-			power_pmu_read(counter);
-			write_pmc(counter->hw.idx, 0);
-			counter->hw.idx = 0;
+	for (i = 0; i < cpuhw->n_events; ++i) {
+		event = cpuhw->event[i];
+		if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) {
+			power_pmu_read(event);
+			write_pmc(event->hw.idx, 0);
+			event->hw.idx = 0;
 		}
 		}
 	}
 	}
 
 
 	/*
 	/*
-	 * Initialize the PMCs for all the new and moved counters.
+	 * Initialize the PMCs for all the new and moved events.
 	 */
 	 */
 	cpuhw->n_limited = n_lim = 0;
 	cpuhw->n_limited = n_lim = 0;
-	for (i = 0; i < cpuhw->n_counters; ++i) {
-		counter = cpuhw->counter[i];
-		if (counter->hw.idx)
+	for (i = 0; i < cpuhw->n_events; ++i) {
+		event = cpuhw->event[i];
+		if (event->hw.idx)
 			continue;
 			continue;
 		idx = hwc_index[i] + 1;
 		idx = hwc_index[i] + 1;
 		if (is_limited_pmc(idx)) {
 		if (is_limited_pmc(idx)) {
-			cpuhw->limited_counter[n_lim] = counter;
+			cpuhw->limited_event[n_lim] = event;
 			cpuhw->limited_hwidx[n_lim] = idx;
 			cpuhw->limited_hwidx[n_lim] = idx;
 			++n_lim;
 			++n_lim;
 			continue;
 			continue;
 		}
 		}
 		val = 0;
 		val = 0;
-		if (counter->hw.sample_period) {
-			left = atomic64_read(&counter->hw.period_left);
+		if (event->hw.sample_period) {
+			left = atomic64_read(&event->hw.period_left);
 			if (left < 0x80000000L)
 			if (left < 0x80000000L)
 				val = 0x80000000L - left;
 				val = 0x80000000L - left;
 		}
 		}
-		atomic64_set(&counter->hw.prev_count, val);
-		counter->hw.idx = idx;
+		atomic64_set(&event->hw.prev_count, val);
+		event->hw.idx = idx;
 		write_pmc(idx, val);
 		write_pmc(idx, val);
-		perf_counter_update_userpage(counter);
+		perf_event_update_userpage(event);
 	}
 	}
 	cpuhw->n_limited = n_lim;
 	cpuhw->n_limited = n_lim;
 	cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
 	cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
@@ -688,85 +688,85 @@ void hw_perf_enable(void)
 	local_irq_restore(flags);
 	local_irq_restore(flags);
 }
 }
 
 
-static int collect_events(struct perf_counter *group, int max_count,
-			  struct perf_counter *ctrs[], u64 *events,
+static int collect_events(struct perf_event *group, int max_count,
+			  struct perf_event *ctrs[], u64 *events,
 			  unsigned int *flags)
 			  unsigned int *flags)
 {
 {
 	int n = 0;
 	int n = 0;
-	struct perf_counter *counter;
+	struct perf_event *event;
 
 
-	if (!is_software_counter(group)) {
+	if (!is_software_event(group)) {
 		if (n >= max_count)
 		if (n >= max_count)
 			return -1;
 			return -1;
 		ctrs[n] = group;
 		ctrs[n] = group;
-		flags[n] = group->hw.counter_base;
+		flags[n] = group->hw.event_base;
 		events[n++] = group->hw.config;
 		events[n++] = group->hw.config;
 	}
 	}
-	list_for_each_entry(counter, &group->sibling_list, list_entry) {
-		if (!is_software_counter(counter) &&
-		    counter->state != PERF_COUNTER_STATE_OFF) {
+	list_for_each_entry(event, &group->sibling_list, list_entry) {
+		if (!is_software_event(event) &&
+		    event->state != PERF_EVENT_STATE_OFF) {
 			if (n >= max_count)
 			if (n >= max_count)
 				return -1;
 				return -1;
-			ctrs[n] = counter;
-			flags[n] = counter->hw.counter_base;
-			events[n++] = counter->hw.config;
+			ctrs[n] = event;
+			flags[n] = event->hw.event_base;
+			events[n++] = event->hw.config;
 		}
 		}
 	}
 	}
 	return n;
 	return n;
 }
 }
 
 
-static void counter_sched_in(struct perf_counter *counter, int cpu)
+static void event_sched_in(struct perf_event *event, int cpu)
 {
 {
-	counter->state = PERF_COUNTER_STATE_ACTIVE;
-	counter->oncpu = cpu;
-	counter->tstamp_running += counter->ctx->time - counter->tstamp_stopped;
-	if (is_software_counter(counter))
-		counter->pmu->enable(counter);
+	event->state = PERF_EVENT_STATE_ACTIVE;
+	event->oncpu = cpu;
+	event->tstamp_running += event->ctx->time - event->tstamp_stopped;
+	if (is_software_event(event))
+		event->pmu->enable(event);
 }
 }
 
 
 /*
 /*
- * Called to enable a whole group of counters.
+ * Called to enable a whole group of events.
  * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
  * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
  * Assumes the caller has disabled interrupts and has
  * Assumes the caller has disabled interrupts and has
  * frozen the PMU with hw_perf_save_disable.
  * frozen the PMU with hw_perf_save_disable.
  */
  */
-int hw_perf_group_sched_in(struct perf_counter *group_leader,
+int hw_perf_group_sched_in(struct perf_event *group_leader,
 	       struct perf_cpu_context *cpuctx,
 	       struct perf_cpu_context *cpuctx,
-	       struct perf_counter_context *ctx, int cpu)
+	       struct perf_event_context *ctx, int cpu)
 {
 {
-	struct cpu_hw_counters *cpuhw;
+	struct cpu_hw_events *cpuhw;
 	long i, n, n0;
 	long i, n, n0;
-	struct perf_counter *sub;
+	struct perf_event *sub;
 
 
 	if (!ppmu)
 	if (!ppmu)
 		return 0;
 		return 0;
-	cpuhw = &__get_cpu_var(cpu_hw_counters);
-	n0 = cpuhw->n_counters;
-	n = collect_events(group_leader, ppmu->n_counter - n0,
-			   &cpuhw->counter[n0], &cpuhw->events[n0],
+	cpuhw = &__get_cpu_var(cpu_hw_events);
+	n0 = cpuhw->n_events;
+	n = collect_events(group_leader, ppmu->n_event - n0,
+			   &cpuhw->event[n0], &cpuhw->events[n0],
 			   &cpuhw->flags[n0]);
 			   &cpuhw->flags[n0]);
 	if (n < 0)
 	if (n < 0)
 		return -EAGAIN;
 		return -EAGAIN;
-	if (check_excludes(cpuhw->counter, cpuhw->flags, n0, n))
+	if (check_excludes(cpuhw->event, cpuhw->flags, n0, n))
 		return -EAGAIN;
 		return -EAGAIN;
 	i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n + n0);
 	i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n + n0);
 	if (i < 0)
 	if (i < 0)
 		return -EAGAIN;
 		return -EAGAIN;
-	cpuhw->n_counters = n0 + n;
+	cpuhw->n_events = n0 + n;
 	cpuhw->n_added += n;
 	cpuhw->n_added += n;
 
 
 	/*
 	/*
-	 * OK, this group can go on; update counter states etc.,
-	 * and enable any software counters
+	 * OK, this group can go on; update event states etc.,
+	 * and enable any software events
 	 */
 	 */
 	for (i = n0; i < n0 + n; ++i)
 	for (i = n0; i < n0 + n; ++i)
-		cpuhw->counter[i]->hw.config = cpuhw->events[i];
+		cpuhw->event[i]->hw.config = cpuhw->events[i];
 	cpuctx->active_oncpu += n;
 	cpuctx->active_oncpu += n;
 	n = 1;
 	n = 1;
-	counter_sched_in(group_leader, cpu);
+	event_sched_in(group_leader, cpu);
 	list_for_each_entry(sub, &group_leader->sibling_list, list_entry) {
 	list_for_each_entry(sub, &group_leader->sibling_list, list_entry) {
-		if (sub->state != PERF_COUNTER_STATE_OFF) {
-			counter_sched_in(sub, cpu);
+		if (sub->state != PERF_EVENT_STATE_OFF) {
+			event_sched_in(sub, cpu);
 			++n;
 			++n;
 		}
 		}
 	}
 	}
@@ -776,14 +776,14 @@ int hw_perf_group_sched_in(struct perf_counter *group_leader,
 }
 }
 
 
 /*
 /*
- * Add a counter to the PMU.
- * If all counters are not already frozen, then we disable and
+ * Add a event to the PMU.
+ * If all events are not already frozen, then we disable and
  * re-enable the PMU in order to get hw_perf_enable to do the
  * re-enable the PMU in order to get hw_perf_enable to do the
  * actual work of reconfiguring the PMU.
  * actual work of reconfiguring the PMU.
  */
  */
-static int power_pmu_enable(struct perf_counter *counter)
+static int power_pmu_enable(struct perf_event *event)
 {
 {
-	struct cpu_hw_counters *cpuhw;
+	struct cpu_hw_events *cpuhw;
 	unsigned long flags;
 	unsigned long flags;
 	int n0;
 	int n0;
 	int ret = -EAGAIN;
 	int ret = -EAGAIN;
@@ -792,23 +792,23 @@ static int power_pmu_enable(struct perf_counter *counter)
 	perf_disable();
 	perf_disable();
 
 
 	/*
 	/*
-	 * Add the counter to the list (if there is room)
+	 * Add the event to the list (if there is room)
 	 * and check whether the total set is still feasible.
 	 * and check whether the total set is still feasible.
 	 */
 	 */
-	cpuhw = &__get_cpu_var(cpu_hw_counters);
-	n0 = cpuhw->n_counters;
-	if (n0 >= ppmu->n_counter)
+	cpuhw = &__get_cpu_var(cpu_hw_events);
+	n0 = cpuhw->n_events;
+	if (n0 >= ppmu->n_event)
 		goto out;
 		goto out;
-	cpuhw->counter[n0] = counter;
-	cpuhw->events[n0] = counter->hw.config;
-	cpuhw->flags[n0] = counter->hw.counter_base;
-	if (check_excludes(cpuhw->counter, cpuhw->flags, n0, 1))
+	cpuhw->event[n0] = event;
+	cpuhw->events[n0] = event->hw.config;
+	cpuhw->flags[n0] = event->hw.event_base;
+	if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
 		goto out;
 		goto out;
 	if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1))
 	if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1))
 		goto out;
 		goto out;
 
 
-	counter->hw.config = cpuhw->events[n0];
-	++cpuhw->n_counters;
+	event->hw.config = cpuhw->events[n0];
+	++cpuhw->n_events;
 	++cpuhw->n_added;
 	++cpuhw->n_added;
 
 
 	ret = 0;
 	ret = 0;
@@ -819,46 +819,46 @@ static int power_pmu_enable(struct perf_counter *counter)
 }
 }
 
 
 /*
 /*
- * Remove a counter from the PMU.
+ * Remove a event from the PMU.
  */
  */
-static void power_pmu_disable(struct perf_counter *counter)
+static void power_pmu_disable(struct perf_event *event)
 {
 {
-	struct cpu_hw_counters *cpuhw;
+	struct cpu_hw_events *cpuhw;
 	long i;
 	long i;
 	unsigned long flags;
 	unsigned long flags;
 
 
 	local_irq_save(flags);
 	local_irq_save(flags);
 	perf_disable();
 	perf_disable();
 
 
-	power_pmu_read(counter);
-
-	cpuhw = &__get_cpu_var(cpu_hw_counters);
-	for (i = 0; i < cpuhw->n_counters; ++i) {
-		if (counter == cpuhw->counter[i]) {
-			while (++i < cpuhw->n_counters)
-				cpuhw->counter[i-1] = cpuhw->counter[i];
-			--cpuhw->n_counters;
-			ppmu->disable_pmc(counter->hw.idx - 1, cpuhw->mmcr);
-			if (counter->hw.idx) {
-				write_pmc(counter->hw.idx, 0);
-				counter->hw.idx = 0;
+	power_pmu_read(event);
+
+	cpuhw = &__get_cpu_var(cpu_hw_events);
+	for (i = 0; i < cpuhw->n_events; ++i) {
+		if (event == cpuhw->event[i]) {
+			while (++i < cpuhw->n_events)
+				cpuhw->event[i-1] = cpuhw->event[i];
+			--cpuhw->n_events;
+			ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr);
+			if (event->hw.idx) {
+				write_pmc(event->hw.idx, 0);
+				event->hw.idx = 0;
 			}
 			}
-			perf_counter_update_userpage(counter);
+			perf_event_update_userpage(event);
 			break;
 			break;
 		}
 		}
 	}
 	}
 	for (i = 0; i < cpuhw->n_limited; ++i)
 	for (i = 0; i < cpuhw->n_limited; ++i)
-		if (counter == cpuhw->limited_counter[i])
+		if (event == cpuhw->limited_event[i])
 			break;
 			break;
 	if (i < cpuhw->n_limited) {
 	if (i < cpuhw->n_limited) {
 		while (++i < cpuhw->n_limited) {
 		while (++i < cpuhw->n_limited) {
-			cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i];
+			cpuhw->limited_event[i-1] = cpuhw->limited_event[i];
 			cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i];
 			cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i];
 		}
 		}
 		--cpuhw->n_limited;
 		--cpuhw->n_limited;
 	}
 	}
-	if (cpuhw->n_counters == 0) {
-		/* disable exceptions if no counters are running */
+	if (cpuhw->n_events == 0) {
+		/* disable exceptions if no events are running */
 		cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
 		cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
 	}
 	}
 
 
@@ -867,28 +867,28 @@ static void power_pmu_disable(struct perf_counter *counter)
 }
 }
 
 
 /*
 /*
- * Re-enable interrupts on a counter after they were throttled
+ * Re-enable interrupts on a event after they were throttled
  * because they were coming too fast.
  * because they were coming too fast.
  */
  */
-static void power_pmu_unthrottle(struct perf_counter *counter)
+static void power_pmu_unthrottle(struct perf_event *event)
 {
 {
 	s64 val, left;
 	s64 val, left;
 	unsigned long flags;
 	unsigned long flags;
 
 
-	if (!counter->hw.idx || !counter->hw.sample_period)
+	if (!event->hw.idx || !event->hw.sample_period)
 		return;
 		return;
 	local_irq_save(flags);
 	local_irq_save(flags);
 	perf_disable();
 	perf_disable();
-	power_pmu_read(counter);
-	left = counter->hw.sample_period;
-	counter->hw.last_period = left;
+	power_pmu_read(event);
+	left = event->hw.sample_period;
+	event->hw.last_period = left;
 	val = 0;
 	val = 0;
 	if (left < 0x80000000L)
 	if (left < 0x80000000L)
 		val = 0x80000000L - left;
 		val = 0x80000000L - left;
-	write_pmc(counter->hw.idx, val);
-	atomic64_set(&counter->hw.prev_count, val);
-	atomic64_set(&counter->hw.period_left, left);
-	perf_counter_update_userpage(counter);
+	write_pmc(event->hw.idx, val);
+	atomic64_set(&event->hw.prev_count, val);
+	atomic64_set(&event->hw.period_left, left);
+	perf_event_update_userpage(event);
 	perf_enable();
 	perf_enable();
 	local_irq_restore(flags);
 	local_irq_restore(flags);
 }
 }
@@ -901,29 +901,29 @@ struct pmu power_pmu = {
 };
 };
 
 
 /*
 /*
- * Return 1 if we might be able to put counter on a limited PMC,
+ * Return 1 if we might be able to put event on a limited PMC,
  * or 0 if not.
  * or 0 if not.
- * A counter can only go on a limited PMC if it counts something
+ * A event can only go on a limited PMC if it counts something
  * that a limited PMC can count, doesn't require interrupts, and
  * that a limited PMC can count, doesn't require interrupts, and
  * doesn't exclude any processor mode.
  * doesn't exclude any processor mode.
  */
  */
-static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev,
+static int can_go_on_limited_pmc(struct perf_event *event, u64 ev,
 				 unsigned int flags)
 				 unsigned int flags)
 {
 {
 	int n;
 	int n;
 	u64 alt[MAX_EVENT_ALTERNATIVES];
 	u64 alt[MAX_EVENT_ALTERNATIVES];
 
 
-	if (counter->attr.exclude_user
-	    || counter->attr.exclude_kernel
-	    || counter->attr.exclude_hv
-	    || counter->attr.sample_period)
+	if (event->attr.exclude_user
+	    || event->attr.exclude_kernel
+	    || event->attr.exclude_hv
+	    || event->attr.sample_period)
 		return 0;
 		return 0;
 
 
 	if (ppmu->limited_pmc_event(ev))
 	if (ppmu->limited_pmc_event(ev))
 		return 1;
 		return 1;
 
 
 	/*
 	/*
-	 * The requested event isn't on a limited PMC already;
+	 * The requested event_id isn't on a limited PMC already;
 	 * see if any alternative code goes on a limited PMC.
 	 * see if any alternative code goes on a limited PMC.
 	 */
 	 */
 	if (!ppmu->get_alternatives)
 	if (!ppmu->get_alternatives)
@@ -936,9 +936,9 @@ static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev,
 }
 }
 
 
 /*
 /*
- * Find an alternative event that goes on a normal PMC, if possible,
- * and return the event code, or 0 if there is no such alternative.
- * (Note: event code 0 is "don't count" on all machines.)
+ * Find an alternative event_id that goes on a normal PMC, if possible,
+ * and return the event_id code, or 0 if there is no such alternative.
+ * (Note: event_id code 0 is "don't count" on all machines.)
  */
  */
 static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
 static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
 {
 {
@@ -952,26 +952,26 @@ static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
 	return alt[0];
 	return alt[0];
 }
 }
 
 
-/* Number of perf_counters counting hardware events */
-static atomic_t num_counters;
+/* Number of perf_events counting hardware events */
+static atomic_t num_events;
 /* Used to avoid races in calling reserve/release_pmc_hardware */
 /* Used to avoid races in calling reserve/release_pmc_hardware */
 static DEFINE_MUTEX(pmc_reserve_mutex);
 static DEFINE_MUTEX(pmc_reserve_mutex);
 
 
 /*
 /*
- * Release the PMU if this is the last perf_counter.
+ * Release the PMU if this is the last perf_event.
  */
  */
-static void hw_perf_counter_destroy(struct perf_counter *counter)
+static void hw_perf_event_destroy(struct perf_event *event)
 {
 {
-	if (!atomic_add_unless(&num_counters, -1, 1)) {
+	if (!atomic_add_unless(&num_events, -1, 1)) {
 		mutex_lock(&pmc_reserve_mutex);
 		mutex_lock(&pmc_reserve_mutex);
-		if (atomic_dec_return(&num_counters) == 0)
+		if (atomic_dec_return(&num_events) == 0)
 			release_pmc_hardware();
 			release_pmc_hardware();
 		mutex_unlock(&pmc_reserve_mutex);
 		mutex_unlock(&pmc_reserve_mutex);
 	}
 	}
 }
 }
 
 
 /*
 /*
- * Translate a generic cache event config to a raw event code.
+ * Translate a generic cache event_id config to a raw event_id code.
  */
  */
 static int hw_perf_cache_event(u64 config, u64 *eventp)
 static int hw_perf_cache_event(u64 config, u64 *eventp)
 {
 {
@@ -1000,39 +1000,39 @@ static int hw_perf_cache_event(u64 config, u64 *eventp)
 	return 0;
 	return 0;
 }
 }
 
 
-const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
+const struct pmu *hw_perf_event_init(struct perf_event *event)
 {
 {
 	u64 ev;
 	u64 ev;
 	unsigned long flags;
 	unsigned long flags;
-	struct perf_counter *ctrs[MAX_HWCOUNTERS];
-	u64 events[MAX_HWCOUNTERS];
-	unsigned int cflags[MAX_HWCOUNTERS];
+	struct perf_event *ctrs[MAX_HWEVENTS];
+	u64 events[MAX_HWEVENTS];
+	unsigned int cflags[MAX_HWEVENTS];
 	int n;
 	int n;
 	int err;
 	int err;
-	struct cpu_hw_counters *cpuhw;
+	struct cpu_hw_events *cpuhw;
 
 
 	if (!ppmu)
 	if (!ppmu)
 		return ERR_PTR(-ENXIO);
 		return ERR_PTR(-ENXIO);
-	switch (counter->attr.type) {
+	switch (event->attr.type) {
 	case PERF_TYPE_HARDWARE:
 	case PERF_TYPE_HARDWARE:
-		ev = counter->attr.config;
+		ev = event->attr.config;
 		if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
 		if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
 			return ERR_PTR(-EOPNOTSUPP);
 			return ERR_PTR(-EOPNOTSUPP);
 		ev = ppmu->generic_events[ev];
 		ev = ppmu->generic_events[ev];
 		break;
 		break;
 	case PERF_TYPE_HW_CACHE:
 	case PERF_TYPE_HW_CACHE:
-		err = hw_perf_cache_event(counter->attr.config, &ev);
+		err = hw_perf_cache_event(event->attr.config, &ev);
 		if (err)
 		if (err)
 			return ERR_PTR(err);
 			return ERR_PTR(err);
 		break;
 		break;
 	case PERF_TYPE_RAW:
 	case PERF_TYPE_RAW:
-		ev = counter->attr.config;
+		ev = event->attr.config;
 		break;
 		break;
 	default:
 	default:
 		return ERR_PTR(-EINVAL);
 		return ERR_PTR(-EINVAL);
 	}
 	}
-	counter->hw.config_base = ev;
-	counter->hw.idx = 0;
+	event->hw.config_base = ev;
+	event->hw.idx = 0;
 
 
 	/*
 	/*
 	 * If we are not running on a hypervisor, force the
 	 * If we are not running on a hypervisor, force the
@@ -1040,28 +1040,28 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
 	 * the user set it to.
 	 * the user set it to.
 	 */
 	 */
 	if (!firmware_has_feature(FW_FEATURE_LPAR))
 	if (!firmware_has_feature(FW_FEATURE_LPAR))
-		counter->attr.exclude_hv = 0;
+		event->attr.exclude_hv = 0;
 
 
 	/*
 	/*
-	 * If this is a per-task counter, then we can use
+	 * If this is a per-task event, then we can use
 	 * PM_RUN_* events interchangeably with their non RUN_*
 	 * PM_RUN_* events interchangeably with their non RUN_*
 	 * equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
 	 * equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
 	 * XXX we should check if the task is an idle task.
 	 * XXX we should check if the task is an idle task.
 	 */
 	 */
 	flags = 0;
 	flags = 0;
-	if (counter->ctx->task)
+	if (event->ctx->task)
 		flags |= PPMU_ONLY_COUNT_RUN;
 		flags |= PPMU_ONLY_COUNT_RUN;
 
 
 	/*
 	/*
-	 * If this machine has limited counters, check whether this
-	 * event could go on a limited counter.
+	 * If this machine has limited events, check whether this
+	 * event_id could go on a limited event.
 	 */
 	 */
 	if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
 	if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
-		if (can_go_on_limited_pmc(counter, ev, flags)) {
+		if (can_go_on_limited_pmc(event, ev, flags)) {
 			flags |= PPMU_LIMITED_PMC_OK;
 			flags |= PPMU_LIMITED_PMC_OK;
 		} else if (ppmu->limited_pmc_event(ev)) {
 		} else if (ppmu->limited_pmc_event(ev)) {
 			/*
 			/*
-			 * The requested event is on a limited PMC,
+			 * The requested event_id is on a limited PMC,
 			 * but we can't use a limited PMC; see if any
 			 * but we can't use a limited PMC; see if any
 			 * alternative goes on a normal PMC.
 			 * alternative goes on a normal PMC.
 			 */
 			 */
@@ -1073,50 +1073,50 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
 
 
 	/*
 	/*
 	 * If this is in a group, check if it can go on with all the
 	 * If this is in a group, check if it can go on with all the
-	 * other hardware counters in the group.  We assume the counter
+	 * other hardware events in the group.  We assume the event
 	 * hasn't been linked into its leader's sibling list at this point.
 	 * hasn't been linked into its leader's sibling list at this point.
 	 */
 	 */
 	n = 0;
 	n = 0;
-	if (counter->group_leader != counter) {
-		n = collect_events(counter->group_leader, ppmu->n_counter - 1,
+	if (event->group_leader != event) {
+		n = collect_events(event->group_leader, ppmu->n_event - 1,
 				   ctrs, events, cflags);
 				   ctrs, events, cflags);
 		if (n < 0)
 		if (n < 0)
 			return ERR_PTR(-EINVAL);
 			return ERR_PTR(-EINVAL);
 	}
 	}
 	events[n] = ev;
 	events[n] = ev;
-	ctrs[n] = counter;
+	ctrs[n] = event;
 	cflags[n] = flags;
 	cflags[n] = flags;
 	if (check_excludes(ctrs, cflags, n, 1))
 	if (check_excludes(ctrs, cflags, n, 1))
 		return ERR_PTR(-EINVAL);
 		return ERR_PTR(-EINVAL);
 
 
-	cpuhw = &get_cpu_var(cpu_hw_counters);
+	cpuhw = &get_cpu_var(cpu_hw_events);
 	err = power_check_constraints(cpuhw, events, cflags, n + 1);
 	err = power_check_constraints(cpuhw, events, cflags, n + 1);
-	put_cpu_var(cpu_hw_counters);
+	put_cpu_var(cpu_hw_events);
 	if (err)
 	if (err)
 		return ERR_PTR(-EINVAL);
 		return ERR_PTR(-EINVAL);
 
 
-	counter->hw.config = events[n];
-	counter->hw.counter_base = cflags[n];
-	counter->hw.last_period = counter->hw.sample_period;
-	atomic64_set(&counter->hw.period_left, counter->hw.last_period);
+	event->hw.config = events[n];
+	event->hw.event_base = cflags[n];
+	event->hw.last_period = event->hw.sample_period;
+	atomic64_set(&event->hw.period_left, event->hw.last_period);
 
 
 	/*
 	/*
 	 * See if we need to reserve the PMU.
 	 * See if we need to reserve the PMU.
-	 * If no counters are currently in use, then we have to take a
+	 * If no events are currently in use, then we have to take a
 	 * mutex to ensure that we don't race with another task doing
 	 * mutex to ensure that we don't race with another task doing
 	 * reserve_pmc_hardware or release_pmc_hardware.
 	 * reserve_pmc_hardware or release_pmc_hardware.
 	 */
 	 */
 	err = 0;
 	err = 0;
-	if (!atomic_inc_not_zero(&num_counters)) {
+	if (!atomic_inc_not_zero(&num_events)) {
 		mutex_lock(&pmc_reserve_mutex);
 		mutex_lock(&pmc_reserve_mutex);
-		if (atomic_read(&num_counters) == 0 &&
-		    reserve_pmc_hardware(perf_counter_interrupt))
+		if (atomic_read(&num_events) == 0 &&
+		    reserve_pmc_hardware(perf_event_interrupt))
 			err = -EBUSY;
 			err = -EBUSY;
 		else
 		else
-			atomic_inc(&num_counters);
+			atomic_inc(&num_events);
 		mutex_unlock(&pmc_reserve_mutex);
 		mutex_unlock(&pmc_reserve_mutex);
 	}
 	}
-	counter->destroy = hw_perf_counter_destroy;
+	event->destroy = hw_perf_event_destroy;
 
 
 	if (err)
 	if (err)
 		return ERR_PTR(err);
 		return ERR_PTR(err);
@@ -1128,24 +1128,24 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
  * things if requested.  Note that interrupts are hard-disabled
  * things if requested.  Note that interrupts are hard-disabled
  * here so there is no possibility of being interrupted.
  * here so there is no possibility of being interrupted.
  */
  */
-static void record_and_restart(struct perf_counter *counter, unsigned long val,
+static void record_and_restart(struct perf_event *event, unsigned long val,
 			       struct pt_regs *regs, int nmi)
 			       struct pt_regs *regs, int nmi)
 {
 {
-	u64 period = counter->hw.sample_period;
+	u64 period = event->hw.sample_period;
 	s64 prev, delta, left;
 	s64 prev, delta, left;
 	int record = 0;
 	int record = 0;
 
 
 	/* we don't have to worry about interrupts here */
 	/* we don't have to worry about interrupts here */
-	prev = atomic64_read(&counter->hw.prev_count);
+	prev = atomic64_read(&event->hw.prev_count);
 	delta = (val - prev) & 0xfffffffful;
 	delta = (val - prev) & 0xfffffffful;
-	atomic64_add(delta, &counter->count);
+	atomic64_add(delta, &event->count);
 
 
 	/*
 	/*
-	 * See if the total period for this counter has expired,
+	 * See if the total period for this event has expired,
 	 * and update for the next period.
 	 * and update for the next period.
 	 */
 	 */
 	val = 0;
 	val = 0;
-	left = atomic64_read(&counter->hw.period_left) - delta;
+	left = atomic64_read(&event->hw.period_left) - delta;
 	if (period) {
 	if (period) {
 		if (left <= 0) {
 		if (left <= 0) {
 			left += period;
 			left += period;
@@ -1163,18 +1163,18 @@ static void record_and_restart(struct perf_counter *counter, unsigned long val,
 	if (record) {
 	if (record) {
 		struct perf_sample_data data = {
 		struct perf_sample_data data = {
 			.addr	= 0,
 			.addr	= 0,
-			.period	= counter->hw.last_period,
+			.period	= event->hw.last_period,
 		};
 		};
 
 
-		if (counter->attr.sample_type & PERF_SAMPLE_ADDR)
+		if (event->attr.sample_type & PERF_SAMPLE_ADDR)
 			perf_get_data_addr(regs, &data.addr);
 			perf_get_data_addr(regs, &data.addr);
 
 
-		if (perf_counter_overflow(counter, nmi, &data, regs)) {
+		if (perf_event_overflow(event, nmi, &data, regs)) {
 			/*
 			/*
 			 * Interrupts are coming too fast - throttle them
 			 * Interrupts are coming too fast - throttle them
-			 * by setting the counter to 0, so it will be
+			 * by setting the event to 0, so it will be
 			 * at least 2^30 cycles until the next interrupt
 			 * at least 2^30 cycles until the next interrupt
-			 * (assuming each counter counts at most 2 counts
+			 * (assuming each event counts at most 2 counts
 			 * per cycle).
 			 * per cycle).
 			 */
 			 */
 			val = 0;
 			val = 0;
@@ -1182,15 +1182,15 @@ static void record_and_restart(struct perf_counter *counter, unsigned long val,
 		}
 		}
 	}
 	}
 
 
-	write_pmc(counter->hw.idx, val);
-	atomic64_set(&counter->hw.prev_count, val);
-	atomic64_set(&counter->hw.period_left, left);
-	perf_counter_update_userpage(counter);
+	write_pmc(event->hw.idx, val);
+	atomic64_set(&event->hw.prev_count, val);
+	atomic64_set(&event->hw.period_left, left);
+	perf_event_update_userpage(event);
 }
 }
 
 
 /*
 /*
  * Called from generic code to get the misc flags (i.e. processor mode)
  * Called from generic code to get the misc flags (i.e. processor mode)
- * for an event.
+ * for an event_id.
  */
  */
 unsigned long perf_misc_flags(struct pt_regs *regs)
 unsigned long perf_misc_flags(struct pt_regs *regs)
 {
 {
@@ -1198,13 +1198,13 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
 
 
 	if (flags)
 	if (flags)
 		return flags;
 		return flags;
-	return user_mode(regs) ? PERF_EVENT_MISC_USER :
-		PERF_EVENT_MISC_KERNEL;
+	return user_mode(regs) ? PERF_RECORD_MISC_USER :
+		PERF_RECORD_MISC_KERNEL;
 }
 }
 
 
 /*
 /*
  * Called from generic code to get the instruction pointer
  * Called from generic code to get the instruction pointer
- * for an event.
+ * for an event_id.
  */
  */
 unsigned long perf_instruction_pointer(struct pt_regs *regs)
 unsigned long perf_instruction_pointer(struct pt_regs *regs)
 {
 {
@@ -1220,17 +1220,17 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs)
 /*
 /*
  * Performance monitor interrupt stuff
  * Performance monitor interrupt stuff
  */
  */
-static void perf_counter_interrupt(struct pt_regs *regs)
+static void perf_event_interrupt(struct pt_regs *regs)
 {
 {
 	int i;
 	int i;
-	struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
-	struct perf_counter *counter;
+	struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+	struct perf_event *event;
 	unsigned long val;
 	unsigned long val;
 	int found = 0;
 	int found = 0;
 	int nmi;
 	int nmi;
 
 
 	if (cpuhw->n_limited)
 	if (cpuhw->n_limited)
-		freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
+		freeze_limited_events(cpuhw, mfspr(SPRN_PMC5),
 					mfspr(SPRN_PMC6));
 					mfspr(SPRN_PMC6));
 
 
 	perf_read_regs(regs);
 	perf_read_regs(regs);
@@ -1241,26 +1241,26 @@ static void perf_counter_interrupt(struct pt_regs *regs)
 	else
 	else
 		irq_enter();
 		irq_enter();
 
 
-	for (i = 0; i < cpuhw->n_counters; ++i) {
-		counter = cpuhw->counter[i];
-		if (!counter->hw.idx || is_limited_pmc(counter->hw.idx))
+	for (i = 0; i < cpuhw->n_events; ++i) {
+		event = cpuhw->event[i];
+		if (!event->hw.idx || is_limited_pmc(event->hw.idx))
 			continue;
 			continue;
-		val = read_pmc(counter->hw.idx);
+		val = read_pmc(event->hw.idx);
 		if ((int)val < 0) {
 		if ((int)val < 0) {
-			/* counter has overflowed */
+			/* event has overflowed */
 			found = 1;
 			found = 1;
-			record_and_restart(counter, val, regs, nmi);
+			record_and_restart(event, val, regs, nmi);
 		}
 		}
 	}
 	}
 
 
 	/*
 	/*
-	 * In case we didn't find and reset the counter that caused
-	 * the interrupt, scan all counters and reset any that are
+	 * In case we didn't find and reset the event that caused
+	 * the interrupt, scan all events and reset any that are
 	 * negative, to avoid getting continual interrupts.
 	 * negative, to avoid getting continual interrupts.
 	 * Any that we processed in the previous loop will not be negative.
 	 * Any that we processed in the previous loop will not be negative.
 	 */
 	 */
 	if (!found) {
 	if (!found) {
-		for (i = 0; i < ppmu->n_counter; ++i) {
+		for (i = 0; i < ppmu->n_event; ++i) {
 			if (is_limited_pmc(i + 1))
 			if (is_limited_pmc(i + 1))
 				continue;
 				continue;
 			val = read_pmc(i + 1);
 			val = read_pmc(i + 1);
@@ -1273,7 +1273,7 @@ static void perf_counter_interrupt(struct pt_regs *regs)
 	 * Reset MMCR0 to its normal value.  This will set PMXE and
 	 * Reset MMCR0 to its normal value.  This will set PMXE and
 	 * clear FC (freeze counters) and PMAO (perf mon alert occurred)
 	 * clear FC (freeze counters) and PMAO (perf mon alert occurred)
 	 * and thus allow interrupts to occur again.
 	 * and thus allow interrupts to occur again.
-	 * XXX might want to use MSR.PM to keep the counters frozen until
+	 * XXX might want to use MSR.PM to keep the events frozen until
 	 * we get back out of this interrupt.
 	 * we get back out of this interrupt.
 	 */
 	 */
 	write_mmcr0(cpuhw, cpuhw->mmcr[0]);
 	write_mmcr0(cpuhw, cpuhw->mmcr[0]);
@@ -1284,9 +1284,9 @@ static void perf_counter_interrupt(struct pt_regs *regs)
 		irq_exit();
 		irq_exit();
 }
 }
 
 
-void hw_perf_counter_setup(int cpu)
+void hw_perf_event_setup(int cpu)
 {
 {
-	struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu);
+	struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
 
 
 	if (!ppmu)
 	if (!ppmu)
 		return;
 		return;
@@ -1308,7 +1308,7 @@ int register_power_pmu(struct power_pmu *pmu)
 	 * Use FCHV to ignore kernel events if MSR.HV is set.
 	 * Use FCHV to ignore kernel events if MSR.HV is set.
 	 */
 	 */
 	if (mfmsr() & MSR_HV)
 	if (mfmsr() & MSR_HV)
-		freeze_counters_kernel = MMCR0_FCHV;
+		freeze_events_kernel = MMCR0_FCHV;
 #endif /* CONFIG_PPC64 */
 #endif /* CONFIG_PPC64 */
 
 
 	return 0;
 	return 0;

+ 1 - 1
arch/powerpc/kernel/power4-pmu.c

@@ -9,7 +9,7 @@
  * 2 of the License, or (at your option) any later version.
  * 2 of the License, or (at your option) any later version.
  */
  */
 #include <linux/kernel.h>
 #include <linux/kernel.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
 #include <linux/string.h>
 #include <linux/string.h>
 #include <asm/reg.h>
 #include <asm/reg.h>
 #include <asm/cputable.h>
 #include <asm/cputable.h>

+ 1 - 1
arch/powerpc/kernel/power5+-pmu.c

@@ -9,7 +9,7 @@
  * 2 of the License, or (at your option) any later version.
  * 2 of the License, or (at your option) any later version.
  */
  */
 #include <linux/kernel.h>
 #include <linux/kernel.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
 #include <linux/string.h>
 #include <linux/string.h>
 #include <asm/reg.h>
 #include <asm/reg.h>
 #include <asm/cputable.h>
 #include <asm/cputable.h>

+ 1 - 1
arch/powerpc/kernel/power5-pmu.c

@@ -9,7 +9,7 @@
  * 2 of the License, or (at your option) any later version.
  * 2 of the License, or (at your option) any later version.
  */
  */
 #include <linux/kernel.h>
 #include <linux/kernel.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
 #include <linux/string.h>
 #include <linux/string.h>
 #include <asm/reg.h>
 #include <asm/reg.h>
 #include <asm/cputable.h>
 #include <asm/cputable.h>

+ 1 - 1
arch/powerpc/kernel/power6-pmu.c

@@ -9,7 +9,7 @@
  * 2 of the License, or (at your option) any later version.
  * 2 of the License, or (at your option) any later version.
  */
  */
 #include <linux/kernel.h>
 #include <linux/kernel.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
 #include <linux/string.h>
 #include <linux/string.h>
 #include <asm/reg.h>
 #include <asm/reg.h>
 #include <asm/cputable.h>
 #include <asm/cputable.h>

+ 1 - 1
arch/powerpc/kernel/power7-pmu.c

@@ -9,7 +9,7 @@
  * 2 of the License, or (at your option) any later version.
  * 2 of the License, or (at your option) any later version.
  */
  */
 #include <linux/kernel.h>
 #include <linux/kernel.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
 #include <linux/string.h>
 #include <linux/string.h>
 #include <asm/reg.h>
 #include <asm/reg.h>
 #include <asm/cputable.h>
 #include <asm/cputable.h>

+ 1 - 1
arch/powerpc/kernel/ppc970-pmu.c

@@ -9,7 +9,7 @@
  * 2 of the License, or (at your option) any later version.
  * 2 of the License, or (at your option) any later version.
  */
  */
 #include <linux/string.h>
 #include <linux/string.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
 #include <asm/reg.h>
 #include <asm/reg.h>
 #include <asm/cputable.h>
 #include <asm/cputable.h>
 
 

+ 15 - 15
arch/powerpc/kernel/time.c

@@ -53,7 +53,7 @@
 #include <linux/posix-timers.h>
 #include <linux/posix-timers.h>
 #include <linux/irq.h>
 #include <linux/irq.h>
 #include <linux/delay.h>
 #include <linux/delay.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
 
 
 #include <asm/io.h>
 #include <asm/io.h>
 #include <asm/processor.h>
 #include <asm/processor.h>
@@ -527,25 +527,25 @@ void __init iSeries_time_init_early(void)
 }
 }
 #endif /* CONFIG_PPC_ISERIES */
 #endif /* CONFIG_PPC_ISERIES */
 
 
-#if defined(CONFIG_PERF_COUNTERS) && defined(CONFIG_PPC32)
-DEFINE_PER_CPU(u8, perf_counter_pending);
+#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_PPC32)
+DEFINE_PER_CPU(u8, perf_event_pending);
 
 
-void set_perf_counter_pending(void)
+void set_perf_event_pending(void)
 {
 {
-	get_cpu_var(perf_counter_pending) = 1;
+	get_cpu_var(perf_event_pending) = 1;
 	set_dec(1);
 	set_dec(1);
-	put_cpu_var(perf_counter_pending);
+	put_cpu_var(perf_event_pending);
 }
 }
 
 
-#define test_perf_counter_pending()	__get_cpu_var(perf_counter_pending)
-#define clear_perf_counter_pending()	__get_cpu_var(perf_counter_pending) = 0
+#define test_perf_event_pending()	__get_cpu_var(perf_event_pending)
+#define clear_perf_event_pending()	__get_cpu_var(perf_event_pending) = 0
 
 
-#else  /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */
+#else  /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */
 
 
-#define test_perf_counter_pending()	0
-#define clear_perf_counter_pending()
+#define test_perf_event_pending()	0
+#define clear_perf_event_pending()
 
 
-#endif /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */
+#endif /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */
 
 
 /*
 /*
  * For iSeries shared processors, we have to let the hypervisor
  * For iSeries shared processors, we have to let the hypervisor
@@ -573,9 +573,9 @@ void timer_interrupt(struct pt_regs * regs)
 	set_dec(DECREMENTER_MAX);
 	set_dec(DECREMENTER_MAX);
 
 
 #ifdef CONFIG_PPC32
 #ifdef CONFIG_PPC32
-	if (test_perf_counter_pending()) {
-		clear_perf_counter_pending();
-		perf_counter_do_pending();
+	if (test_perf_event_pending()) {
+		clear_perf_event_pending();
+		perf_event_do_pending();
 	}
 	}
 	if (atomic_read(&ppc_n_lost_interrupts) != 0)
 	if (atomic_read(&ppc_n_lost_interrupts) != 0)
 		do_IRQ(regs);
 		do_IRQ(regs);

+ 4 - 4
arch/powerpc/mm/fault.c

@@ -29,7 +29,7 @@
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/kprobes.h>
 #include <linux/kprobes.h>
 #include <linux/kdebug.h>
 #include <linux/kdebug.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
 
 
 #include <asm/firmware.h>
 #include <asm/firmware.h>
 #include <asm/page.h>
 #include <asm/page.h>
@@ -171,7 +171,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
 		die("Weird page fault", regs, SIGSEGV);
 		die("Weird page fault", regs, SIGSEGV);
 	}
 	}
 
 
-	perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
 
 
 	/* When running in the kernel we expect faults to occur only to
 	/* When running in the kernel we expect faults to occur only to
 	 * addresses in user space.  All other faults represent errors in the
 	 * addresses in user space.  All other faults represent errors in the
@@ -312,7 +312,7 @@ good_area:
 	}
 	}
 	if (ret & VM_FAULT_MAJOR) {
 	if (ret & VM_FAULT_MAJOR) {
 		current->maj_flt++;
 		current->maj_flt++;
-		perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
 				     regs, address);
 				     regs, address);
 #ifdef CONFIG_PPC_SMLPAR
 #ifdef CONFIG_PPC_SMLPAR
 		if (firmware_has_feature(FW_FEATURE_CMO)) {
 		if (firmware_has_feature(FW_FEATURE_CMO)) {
@@ -323,7 +323,7 @@ good_area:
 #endif
 #endif
 	} else {
 	} else {
 		current->min_flt++;
 		current->min_flt++;
-		perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
 				     regs, address);
 				     regs, address);
 	}
 	}
 	up_read(&mm->mmap_sem);
 	up_read(&mm->mmap_sem);

+ 2 - 2
arch/powerpc/platforms/Kconfig.cputype

@@ -280,9 +280,9 @@ config PPC_HAVE_PMU_SUPPORT
 
 
 config PPC_PERF_CTRS
 config PPC_PERF_CTRS
        def_bool y
        def_bool y
-       depends on PERF_COUNTERS && PPC_HAVE_PMU_SUPPORT
+       depends on PERF_EVENTS && PPC_HAVE_PMU_SUPPORT
        help
        help
-         This enables the powerpc-specific perf_counter back-end.
+         This enables the powerpc-specific perf_event back-end.
 
 
 config SMP
 config SMP
 	depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE
 	depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE

+ 1 - 1
arch/s390/Kconfig

@@ -94,7 +94,7 @@ config S390
 	select HAVE_KVM if 64BIT
 	select HAVE_KVM if 64BIT
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_ARCH_TRACEHOOK
 	select INIT_ALL_POSSIBLE
 	select INIT_ALL_POSSIBLE
-	select HAVE_PERF_COUNTERS
+	select HAVE_PERF_EVENTS
 
 
 config SCHED_OMIT_FRAME_POINTER
 config SCHED_OMIT_FRAME_POINTER
 	bool
 	bool

+ 0 - 10
arch/s390/include/asm/perf_counter.h

@@ -1,10 +0,0 @@
-/*
- * Performance counter support - s390 specific definitions.
- *
- * Copyright 2009 Martin Schwidefsky, IBM Corporation.
- */
-
-static inline void set_perf_counter_pending(void) {}
-static inline void clear_perf_counter_pending(void) {}
-
-#define PERF_COUNTER_INDEX_OFFSET 0

+ 10 - 0
arch/s390/include/asm/perf_event.h

@@ -0,0 +1,10 @@
+/*
+ * Performance event support - s390 specific definitions.
+ *
+ * Copyright 2009 Martin Schwidefsky, IBM Corporation.
+ */
+
+static inline void set_perf_event_pending(void) {}
+static inline void clear_perf_event_pending(void) {}
+
+#define PERF_EVENT_INDEX_OFFSET 0

+ 1 - 1
arch/s390/include/asm/unistd.h

@@ -268,7 +268,7 @@
 #define	__NR_preadv		328
 #define	__NR_preadv		328
 #define	__NR_pwritev		329
 #define	__NR_pwritev		329
 #define __NR_rt_tgsigqueueinfo	330
 #define __NR_rt_tgsigqueueinfo	330
-#define __NR_perf_counter_open	331
+#define __NR_perf_event_open	331
 #define NR_syscalls 332
 #define NR_syscalls 332
 
 
 /* 
 /* 

+ 4 - 4
arch/s390/kernel/compat_wrapper.S

@@ -1832,11 +1832,11 @@ compat_sys_rt_tgsigqueueinfo_wrapper:
 	llgtr	%r5,%r5			# struct compat_siginfo *
 	llgtr	%r5,%r5			# struct compat_siginfo *
 	jg	compat_sys_rt_tgsigqueueinfo_wrapper # branch to system call
 	jg	compat_sys_rt_tgsigqueueinfo_wrapper # branch to system call
 
 
-	.globl	sys_perf_counter_open_wrapper
-sys_perf_counter_open_wrapper:
-	llgtr	%r2,%r2			# const struct perf_counter_attr *
+	.globl	sys_perf_event_open_wrapper
+sys_perf_event_open_wrapper:
+	llgtr	%r2,%r2			# const struct perf_event_attr *
 	lgfr	%r3,%r3			# pid_t
 	lgfr	%r3,%r3			# pid_t
 	lgfr	%r4,%r4			# int
 	lgfr	%r4,%r4			# int
 	lgfr	%r5,%r5			# int
 	lgfr	%r5,%r5			# int
 	llgfr	%r6,%r6			# unsigned long
 	llgfr	%r6,%r6			# unsigned long
-	jg	sys_perf_counter_open	# branch to system call
+	jg	sys_perf_event_open	# branch to system call

+ 1 - 1
arch/s390/kernel/syscalls.S

@@ -339,4 +339,4 @@ SYSCALL(sys_epoll_create1,sys_epoll_create1,sys_epoll_create1_wrapper)
 SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv_wrapper)
 SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv_wrapper)
 SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper)
 SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper)
 SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo_wrapper) /* 330 */
 SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo_wrapper) /* 330 */
-SYSCALL(sys_perf_counter_open,sys_perf_counter_open,sys_perf_counter_open_wrapper)
+SYSCALL(sys_perf_event_open,sys_perf_event_open,sys_perf_event_open_wrapper)

+ 4 - 4
arch/s390/mm/fault.c

@@ -10,7 +10,7 @@
  *    Copyright (C) 1995  Linus Torvalds
  *    Copyright (C) 1995  Linus Torvalds
  */
  */
 
 
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
 #include <linux/signal.h>
 #include <linux/signal.h>
 #include <linux/sched.h>
 #include <linux/sched.h>
 #include <linux/kernel.h>
 #include <linux/kernel.h>
@@ -306,7 +306,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write)
 	 * interrupts again and then search the VMAs
 	 * interrupts again and then search the VMAs
 	 */
 	 */
 	local_irq_enable();
 	local_irq_enable();
-	perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
 	down_read(&mm->mmap_sem);
 	down_read(&mm->mmap_sem);
 
 
 	si_code = SEGV_MAPERR;
 	si_code = SEGV_MAPERR;
@@ -366,11 +366,11 @@ good_area:
 	}
 	}
 	if (fault & VM_FAULT_MAJOR) {
 	if (fault & VM_FAULT_MAJOR) {
 		tsk->maj_flt++;
 		tsk->maj_flt++;
-		perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
 				     regs, address);
 				     regs, address);
 	} else {
 	} else {
 		tsk->min_flt++;
 		tsk->min_flt++;
-		perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
 				     regs, address);
 				     regs, address);
 	}
 	}
         up_read(&mm->mmap_sem);
         up_read(&mm->mmap_sem);

+ 1 - 1
arch/sh/Kconfig

@@ -16,7 +16,7 @@ config SUPERH
 	select HAVE_IOREMAP_PROT if MMU
 	select HAVE_IOREMAP_PROT if MMU
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_DMA_API_DEBUG
 	select HAVE_DMA_API_DEBUG
-	select HAVE_PERF_COUNTERS
+	select HAVE_PERF_EVENTS
 	select HAVE_KERNEL_GZIP
 	select HAVE_KERNEL_GZIP
 	select HAVE_KERNEL_BZIP2
 	select HAVE_KERNEL_BZIP2
 	select HAVE_KERNEL_LZMA
 	select HAVE_KERNEL_LZMA

+ 0 - 9
arch/sh/include/asm/perf_counter.h

@@ -1,9 +0,0 @@
-#ifndef __ASM_SH_PERF_COUNTER_H
-#define __ASM_SH_PERF_COUNTER_H
-
-/* SH only supports software counters through this interface. */
-static inline void set_perf_counter_pending(void) {}
-
-#define PERF_COUNTER_INDEX_OFFSET	0
-
-#endif /* __ASM_SH_PERF_COUNTER_H */

+ 9 - 0
arch/sh/include/asm/perf_event.h

@@ -0,0 +1,9 @@
+#ifndef __ASM_SH_PERF_EVENT_H
+#define __ASM_SH_PERF_EVENT_H
+
+/* SH only supports software events through this interface. */
+static inline void set_perf_event_pending(void) {}
+
+#define PERF_EVENT_INDEX_OFFSET	0
+
+#endif /* __ASM_SH_PERF_EVENT_H */

+ 1 - 1
arch/sh/include/asm/unistd_32.h

@@ -344,7 +344,7 @@
 #define __NR_preadv		333
 #define __NR_preadv		333
 #define __NR_pwritev		334
 #define __NR_pwritev		334
 #define __NR_rt_tgsigqueueinfo	335
 #define __NR_rt_tgsigqueueinfo	335
-#define __NR_perf_counter_open	336
+#define __NR_perf_event_open	336
 
 
 #define NR_syscalls 337
 #define NR_syscalls 337
 
 

+ 1 - 1
arch/sh/include/asm/unistd_64.h

@@ -384,7 +384,7 @@
 #define __NR_preadv		361
 #define __NR_preadv		361
 #define __NR_pwritev		362
 #define __NR_pwritev		362
 #define __NR_rt_tgsigqueueinfo	363
 #define __NR_rt_tgsigqueueinfo	363
-#define __NR_perf_counter_open	364
+#define __NR_perf_event_open	364
 
 
 #ifdef __KERNEL__
 #ifdef __KERNEL__
 
 

+ 1 - 1
arch/sh/kernel/syscalls_32.S

@@ -352,4 +352,4 @@ ENTRY(sys_call_table)
 	.long sys_preadv
 	.long sys_preadv
 	.long sys_pwritev
 	.long sys_pwritev
 	.long sys_rt_tgsigqueueinfo	/* 335 */
 	.long sys_rt_tgsigqueueinfo	/* 335 */
-	.long sys_perf_counter_open
+	.long sys_perf_event_open

+ 1 - 1
arch/sh/kernel/syscalls_64.S

@@ -390,4 +390,4 @@ sys_call_table:
 	.long sys_preadv
 	.long sys_preadv
 	.long sys_pwritev
 	.long sys_pwritev
 	.long sys_rt_tgsigqueueinfo
 	.long sys_rt_tgsigqueueinfo
-	.long sys_perf_counter_open
+	.long sys_perf_event_open

+ 4 - 4
arch/sh/mm/fault_32.c

@@ -15,7 +15,7 @@
 #include <linux/mm.h>
 #include <linux/mm.h>
 #include <linux/hardirq.h>
 #include <linux/hardirq.h>
 #include <linux/kprobes.h>
 #include <linux/kprobes.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
 #include <asm/io_trapped.h>
 #include <asm/io_trapped.h>
 #include <asm/system.h>
 #include <asm/system.h>
 #include <asm/mmu_context.h>
 #include <asm/mmu_context.h>
@@ -157,7 +157,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
 	if ((regs->sr & SR_IMASK) != SR_IMASK)
 	if ((regs->sr & SR_IMASK) != SR_IMASK)
 		local_irq_enable();
 		local_irq_enable();
 
 
-	perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
 
 
 	/*
 	/*
 	 * If we're in an interrupt, have no user context or are running
 	 * If we're in an interrupt, have no user context or are running
@@ -208,11 +208,11 @@ survive:
 	}
 	}
 	if (fault & VM_FAULT_MAJOR) {
 	if (fault & VM_FAULT_MAJOR) {
 		tsk->maj_flt++;
 		tsk->maj_flt++;
-		perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
 				     regs, address);
 				     regs, address);
 	} else {
 	} else {
 		tsk->min_flt++;
 		tsk->min_flt++;
-		perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
 				     regs, address);
 				     regs, address);
 	}
 	}
 
 

+ 4 - 4
arch/sh/mm/tlbflush_64.c

@@ -20,7 +20,7 @@
 #include <linux/mman.h>
 #include <linux/mman.h>
 #include <linux/mm.h>
 #include <linux/mm.h>
 #include <linux/smp.h>
 #include <linux/smp.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
 #include <asm/system.h>
 #include <asm/system.h>
 #include <asm/io.h>
 #include <asm/io.h>
@@ -116,7 +116,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
 	/* Not an IO address, so reenable interrupts */
 	/* Not an IO address, so reenable interrupts */
 	local_irq_enable();
 	local_irq_enable();
 
 
-	perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
 
 
 	/*
 	/*
 	 * If we're in an interrupt or have no user
 	 * If we're in an interrupt or have no user
@@ -201,11 +201,11 @@ survive:
 
 
 	if (fault & VM_FAULT_MAJOR) {
 	if (fault & VM_FAULT_MAJOR) {
 		tsk->maj_flt++;
 		tsk->maj_flt++;
-		perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
 				     regs, address);
 				     regs, address);
 	} else {
 	} else {
 		tsk->min_flt++;
 		tsk->min_flt++;
-		perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
 				     regs, address);
 				     regs, address);
 	}
 	}
 
 

+ 2 - 2
arch/sparc/Kconfig

@@ -25,7 +25,7 @@ config SPARC
 	select ARCH_WANT_OPTIONAL_GPIOLIB
 	select ARCH_WANT_OPTIONAL_GPIOLIB
 	select RTC_CLASS
 	select RTC_CLASS
 	select RTC_DRV_M48T59
 	select RTC_DRV_M48T59
-	select HAVE_PERF_COUNTERS
+	select HAVE_PERF_EVENTS
 	select HAVE_DMA_ATTRS
 	select HAVE_DMA_ATTRS
 	select HAVE_DMA_API_DEBUG
 	select HAVE_DMA_API_DEBUG
 
 
@@ -47,7 +47,7 @@ config SPARC64
 	select RTC_DRV_BQ4802
 	select RTC_DRV_BQ4802
 	select RTC_DRV_SUN4V
 	select RTC_DRV_SUN4V
 	select RTC_DRV_STARFIRE
 	select RTC_DRV_STARFIRE
-	select HAVE_PERF_COUNTERS
+	select HAVE_PERF_EVENTS
 
 
 config ARCH_DEFCONFIG
 config ARCH_DEFCONFIG
 	string
 	string

+ 0 - 14
arch/sparc/include/asm/perf_counter.h

@@ -1,14 +0,0 @@
-#ifndef __ASM_SPARC_PERF_COUNTER_H
-#define __ASM_SPARC_PERF_COUNTER_H
-
-extern void set_perf_counter_pending(void);
-
-#define	PERF_COUNTER_INDEX_OFFSET	0
-
-#ifdef CONFIG_PERF_COUNTERS
-extern void init_hw_perf_counters(void);
-#else
-static inline void init_hw_perf_counters(void)	{ }
-#endif
-
-#endif

+ 14 - 0
arch/sparc/include/asm/perf_event.h

@@ -0,0 +1,14 @@
+#ifndef __ASM_SPARC_PERF_EVENT_H
+#define __ASM_SPARC_PERF_EVENT_H
+
+extern void set_perf_event_pending(void);
+
+#define	PERF_EVENT_INDEX_OFFSET	0
+
+#ifdef CONFIG_PERF_EVENTS
+extern void init_hw_perf_events(void);
+#else
+static inline void init_hw_perf_events(void)	{ }
+#endif
+
+#endif

+ 1 - 1
arch/sparc/include/asm/unistd.h

@@ -395,7 +395,7 @@
 #define __NR_preadv		324
 #define __NR_preadv		324
 #define __NR_pwritev		325
 #define __NR_pwritev		325
 #define __NR_rt_tgsigqueueinfo	326
 #define __NR_rt_tgsigqueueinfo	326
-#define __NR_perf_counter_open	327
+#define __NR_perf_event_open	327
 
 
 #define NR_SYSCALLS		328
 #define NR_SYSCALLS		328
 
 

+ 1 - 1
arch/sparc/kernel/Makefile

@@ -104,5 +104,5 @@ obj-$(CONFIG_AUDIT)     += audit.o
 audit--$(CONFIG_AUDIT)  := compat_audit.o
 audit--$(CONFIG_AUDIT)  := compat_audit.o
 obj-$(CONFIG_COMPAT)    += $(audit--y)
 obj-$(CONFIG_COMPAT)    += $(audit--y)
 
 
-pc--$(CONFIG_PERF_COUNTERS) := perf_counter.o
+pc--$(CONFIG_PERF_EVENTS) := perf_event.o
 obj-$(CONFIG_SPARC64)	+= $(pc--y)
 obj-$(CONFIG_SPARC64)	+= $(pc--y)

+ 2 - 2
arch/sparc/kernel/nmi.c

@@ -19,7 +19,7 @@
 #include <linux/delay.h>
 #include <linux/delay.h>
 #include <linux/smp.h>
 #include <linux/smp.h>
 
 
-#include <asm/perf_counter.h>
+#include <asm/perf_event.h>
 #include <asm/ptrace.h>
 #include <asm/ptrace.h>
 #include <asm/local.h>
 #include <asm/local.h>
 #include <asm/pcr.h>
 #include <asm/pcr.h>
@@ -265,7 +265,7 @@ int __init nmi_init(void)
 		}
 		}
 	}
 	}
 	if (!err)
 	if (!err)
-		init_hw_perf_counters();
+		init_hw_perf_events();
 
 
 	return err;
 	return err;
 }
 }

+ 5 - 5
arch/sparc/kernel/pcr.c

@@ -7,7 +7,7 @@
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/irq.h>
 
 
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
 
 
 #include <asm/pil.h>
 #include <asm/pil.h>
 #include <asm/pcr.h>
 #include <asm/pcr.h>
@@ -15,7 +15,7 @@
 
 
 /* This code is shared between various users of the performance
 /* This code is shared between various users of the performance
  * counters.  Users will be oprofile, pseudo-NMI watchdog, and the
  * counters.  Users will be oprofile, pseudo-NMI watchdog, and the
- * perf_counter support layer.
+ * perf_event support layer.
  */
  */
 
 
 #define PCR_SUN4U_ENABLE	(PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE)
 #define PCR_SUN4U_ENABLE	(PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE)
@@ -42,14 +42,14 @@ void deferred_pcr_work_irq(int irq, struct pt_regs *regs)
 
 
 	old_regs = set_irq_regs(regs);
 	old_regs = set_irq_regs(regs);
 	irq_enter();
 	irq_enter();
-#ifdef CONFIG_PERF_COUNTERS
-	perf_counter_do_pending();
+#ifdef CONFIG_PERF_EVENTS
+	perf_event_do_pending();
 #endif
 #endif
 	irq_exit();
 	irq_exit();
 	set_irq_regs(old_regs);
 	set_irq_regs(old_regs);
 }
 }
 
 
-void set_perf_counter_pending(void)
+void set_perf_event_pending(void)
 {
 {
 	set_softint(1 << PIL_DEFERRED_PCR_WORK);
 	set_softint(1 << PIL_DEFERRED_PCR_WORK);
 }
 }

+ 89 - 89
arch/sparc/kernel/perf_counter.c → arch/sparc/kernel/perf_event.c

@@ -1,8 +1,8 @@
-/* Performance counter support for sparc64.
+/* Performance event support for sparc64.
  *
  *
  * Copyright (C) 2009 David S. Miller <davem@davemloft.net>
  * Copyright (C) 2009 David S. Miller <davem@davemloft.net>
  *
  *
- * This code is based almost entirely upon the x86 perf counter
+ * This code is based almost entirely upon the x86 perf event
  * code, which is:
  * code, which is:
  *
  *
  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
@@ -12,7 +12,7 @@
  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  */
  */
 
 
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
 #include <linux/kprobes.h>
 #include <linux/kprobes.h>
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/kdebug.h>
 #include <linux/kdebug.h>
@@ -46,19 +46,19 @@
  * normal code.
  * normal code.
  */
  */
 
 
-#define MAX_HWCOUNTERS			2
+#define MAX_HWEVENTS			2
 #define MAX_PERIOD			((1UL << 32) - 1)
 #define MAX_PERIOD			((1UL << 32) - 1)
 
 
 #define PIC_UPPER_INDEX			0
 #define PIC_UPPER_INDEX			0
 #define PIC_LOWER_INDEX			1
 #define PIC_LOWER_INDEX			1
 
 
-struct cpu_hw_counters {
-	struct perf_counter	*counters[MAX_HWCOUNTERS];
-	unsigned long		used_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)];
-	unsigned long		active_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)];
+struct cpu_hw_events {
+	struct perf_event	*events[MAX_HWEVENTS];
+	unsigned long		used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
+	unsigned long		active_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
 	int enabled;
 	int enabled;
 };
 };
-DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { .enabled = 1, };
+DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
 
 
 struct perf_event_map {
 struct perf_event_map {
 	u16	encoding;
 	u16	encoding;
@@ -87,9 +87,9 @@ static const struct perf_event_map ultra3i_perfmon_event_map[] = {
 	[PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
 	[PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
 };
 };
 
 
-static const struct perf_event_map *ultra3i_event_map(int event)
+static const struct perf_event_map *ultra3i_event_map(int event_id)
 {
 {
-	return &ultra3i_perfmon_event_map[event];
+	return &ultra3i_perfmon_event_map[event_id];
 }
 }
 
 
 static const struct sparc_pmu ultra3i_pmu = {
 static const struct sparc_pmu ultra3i_pmu = {
@@ -111,9 +111,9 @@ static const struct perf_event_map niagara2_perfmon_event_map[] = {
 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER },
 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER },
 };
 };
 
 
-static const struct perf_event_map *niagara2_event_map(int event)
+static const struct perf_event_map *niagara2_event_map(int event_id)
 {
 {
-	return &niagara2_perfmon_event_map[event];
+	return &niagara2_perfmon_event_map[event_id];
 }
 }
 
 
 static const struct sparc_pmu niagara2_pmu = {
 static const struct sparc_pmu niagara2_pmu = {
@@ -130,13 +130,13 @@ static const struct sparc_pmu niagara2_pmu = {
 
 
 static const struct sparc_pmu *sparc_pmu __read_mostly;
 static const struct sparc_pmu *sparc_pmu __read_mostly;
 
 
-static u64 event_encoding(u64 event, int idx)
+static u64 event_encoding(u64 event_id, int idx)
 {
 {
 	if (idx == PIC_UPPER_INDEX)
 	if (idx == PIC_UPPER_INDEX)
-		event <<= sparc_pmu->upper_shift;
+		event_id <<= sparc_pmu->upper_shift;
 	else
 	else
-		event <<= sparc_pmu->lower_shift;
-	return event;
+		event_id <<= sparc_pmu->lower_shift;
+	return event_id;
 }
 }
 
 
 static u64 mask_for_index(int idx)
 static u64 mask_for_index(int idx)
@@ -151,7 +151,7 @@ static u64 nop_for_index(int idx)
 			      sparc_pmu->lower_nop, idx);
 			      sparc_pmu->lower_nop, idx);
 }
 }
 
 
-static inline void sparc_pmu_enable_counter(struct hw_perf_counter *hwc,
+static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc,
 					    int idx)
 					    int idx)
 {
 {
 	u64 val, mask = mask_for_index(idx);
 	u64 val, mask = mask_for_index(idx);
@@ -160,7 +160,7 @@ static inline void sparc_pmu_enable_counter(struct hw_perf_counter *hwc,
 	pcr_ops->write((val & ~mask) | hwc->config);
 	pcr_ops->write((val & ~mask) | hwc->config);
 }
 }
 
 
-static inline void sparc_pmu_disable_counter(struct hw_perf_counter *hwc,
+static inline void sparc_pmu_disable_event(struct hw_perf_event *hwc,
 					     int idx)
 					     int idx)
 {
 {
 	u64 mask = mask_for_index(idx);
 	u64 mask = mask_for_index(idx);
@@ -172,7 +172,7 @@ static inline void sparc_pmu_disable_counter(struct hw_perf_counter *hwc,
 
 
 void hw_perf_enable(void)
 void hw_perf_enable(void)
 {
 {
-	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 	u64 val;
 	u64 val;
 	int i;
 	int i;
 
 
@@ -184,9 +184,9 @@ void hw_perf_enable(void)
 
 
 	val = pcr_ops->read();
 	val = pcr_ops->read();
 
 
-	for (i = 0; i < MAX_HWCOUNTERS; i++) {
-		struct perf_counter *cp = cpuc->counters[i];
-		struct hw_perf_counter *hwc;
+	for (i = 0; i < MAX_HWEVENTS; i++) {
+		struct perf_event *cp = cpuc->events[i];
+		struct hw_perf_event *hwc;
 
 
 		if (!cp)
 		if (!cp)
 			continue;
 			continue;
@@ -199,7 +199,7 @@ void hw_perf_enable(void)
 
 
 void hw_perf_disable(void)
 void hw_perf_disable(void)
 {
 {
-	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 	u64 val;
 	u64 val;
 
 
 	if (!cpuc->enabled)
 	if (!cpuc->enabled)
@@ -241,8 +241,8 @@ static void write_pmc(int idx, u64 val)
 	write_pic(pic);
 	write_pic(pic);
 }
 }
 
 
-static int sparc_perf_counter_set_period(struct perf_counter *counter,
-					 struct hw_perf_counter *hwc, int idx)
+static int sparc_perf_event_set_period(struct perf_event *event,
+					 struct hw_perf_event *hwc, int idx)
 {
 {
 	s64 left = atomic64_read(&hwc->period_left);
 	s64 left = atomic64_read(&hwc->period_left);
 	s64 period = hwc->sample_period;
 	s64 period = hwc->sample_period;
@@ -268,33 +268,33 @@ static int sparc_perf_counter_set_period(struct perf_counter *counter,
 
 
 	write_pmc(idx, (u64)(-left) & 0xffffffff);
 	write_pmc(idx, (u64)(-left) & 0xffffffff);
 
 
-	perf_counter_update_userpage(counter);
+	perf_event_update_userpage(event);
 
 
 	return ret;
 	return ret;
 }
 }
 
 
-static int sparc_pmu_enable(struct perf_counter *counter)
+static int sparc_pmu_enable(struct perf_event *event)
 {
 {
-	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
-	struct hw_perf_counter *hwc = &counter->hw;
+	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+	struct hw_perf_event *hwc = &event->hw;
 	int idx = hwc->idx;
 	int idx = hwc->idx;
 
 
 	if (test_and_set_bit(idx, cpuc->used_mask))
 	if (test_and_set_bit(idx, cpuc->used_mask))
 		return -EAGAIN;
 		return -EAGAIN;
 
 
-	sparc_pmu_disable_counter(hwc, idx);
+	sparc_pmu_disable_event(hwc, idx);
 
 
-	cpuc->counters[idx] = counter;
+	cpuc->events[idx] = event;
 	set_bit(idx, cpuc->active_mask);
 	set_bit(idx, cpuc->active_mask);
 
 
-	sparc_perf_counter_set_period(counter, hwc, idx);
-	sparc_pmu_enable_counter(hwc, idx);
-	perf_counter_update_userpage(counter);
+	sparc_perf_event_set_period(event, hwc, idx);
+	sparc_pmu_enable_event(hwc, idx);
+	perf_event_update_userpage(event);
 	return 0;
 	return 0;
 }
 }
 
 
-static u64 sparc_perf_counter_update(struct perf_counter *counter,
-				     struct hw_perf_counter *hwc, int idx)
+static u64 sparc_perf_event_update(struct perf_event *event,
+				     struct hw_perf_event *hwc, int idx)
 {
 {
 	int shift = 64 - 32;
 	int shift = 64 - 32;
 	u64 prev_raw_count, new_raw_count;
 	u64 prev_raw_count, new_raw_count;
@@ -311,79 +311,79 @@ again:
 	delta = (new_raw_count << shift) - (prev_raw_count << shift);
 	delta = (new_raw_count << shift) - (prev_raw_count << shift);
 	delta >>= shift;
 	delta >>= shift;
 
 
-	atomic64_add(delta, &counter->count);
+	atomic64_add(delta, &event->count);
 	atomic64_sub(delta, &hwc->period_left);
 	atomic64_sub(delta, &hwc->period_left);
 
 
 	return new_raw_count;
 	return new_raw_count;
 }
 }
 
 
-static void sparc_pmu_disable(struct perf_counter *counter)
+static void sparc_pmu_disable(struct perf_event *event)
 {
 {
-	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
-	struct hw_perf_counter *hwc = &counter->hw;
+	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+	struct hw_perf_event *hwc = &event->hw;
 	int idx = hwc->idx;
 	int idx = hwc->idx;
 
 
 	clear_bit(idx, cpuc->active_mask);
 	clear_bit(idx, cpuc->active_mask);
-	sparc_pmu_disable_counter(hwc, idx);
+	sparc_pmu_disable_event(hwc, idx);
 
 
 	barrier();
 	barrier();
 
 
-	sparc_perf_counter_update(counter, hwc, idx);
-	cpuc->counters[idx] = NULL;
+	sparc_perf_event_update(event, hwc, idx);
+	cpuc->events[idx] = NULL;
 	clear_bit(idx, cpuc->used_mask);
 	clear_bit(idx, cpuc->used_mask);
 
 
-	perf_counter_update_userpage(counter);
+	perf_event_update_userpage(event);
 }
 }
 
 
-static void sparc_pmu_read(struct perf_counter *counter)
+static void sparc_pmu_read(struct perf_event *event)
 {
 {
-	struct hw_perf_counter *hwc = &counter->hw;
-	sparc_perf_counter_update(counter, hwc, hwc->idx);
+	struct hw_perf_event *hwc = &event->hw;
+	sparc_perf_event_update(event, hwc, hwc->idx);
 }
 }
 
 
-static void sparc_pmu_unthrottle(struct perf_counter *counter)
+static void sparc_pmu_unthrottle(struct perf_event *event)
 {
 {
-	struct hw_perf_counter *hwc = &counter->hw;
-	sparc_pmu_enable_counter(hwc, hwc->idx);
+	struct hw_perf_event *hwc = &event->hw;
+	sparc_pmu_enable_event(hwc, hwc->idx);
 }
 }
 
 
-static atomic_t active_counters = ATOMIC_INIT(0);
+static atomic_t active_events = ATOMIC_INIT(0);
 static DEFINE_MUTEX(pmc_grab_mutex);
 static DEFINE_MUTEX(pmc_grab_mutex);
 
 
-void perf_counter_grab_pmc(void)
+void perf_event_grab_pmc(void)
 {
 {
-	if (atomic_inc_not_zero(&active_counters))
+	if (atomic_inc_not_zero(&active_events))
 		return;
 		return;
 
 
 	mutex_lock(&pmc_grab_mutex);
 	mutex_lock(&pmc_grab_mutex);
-	if (atomic_read(&active_counters) == 0) {
+	if (atomic_read(&active_events) == 0) {
 		if (atomic_read(&nmi_active) > 0) {
 		if (atomic_read(&nmi_active) > 0) {
 			on_each_cpu(stop_nmi_watchdog, NULL, 1);
 			on_each_cpu(stop_nmi_watchdog, NULL, 1);
 			BUG_ON(atomic_read(&nmi_active) != 0);
 			BUG_ON(atomic_read(&nmi_active) != 0);
 		}
 		}
-		atomic_inc(&active_counters);
+		atomic_inc(&active_events);
 	}
 	}
 	mutex_unlock(&pmc_grab_mutex);
 	mutex_unlock(&pmc_grab_mutex);
 }
 }
 
 
-void perf_counter_release_pmc(void)
+void perf_event_release_pmc(void)
 {
 {
-	if (atomic_dec_and_mutex_lock(&active_counters, &pmc_grab_mutex)) {
+	if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
 		if (atomic_read(&nmi_active) == 0)
 		if (atomic_read(&nmi_active) == 0)
 			on_each_cpu(start_nmi_watchdog, NULL, 1);
 			on_each_cpu(start_nmi_watchdog, NULL, 1);
 		mutex_unlock(&pmc_grab_mutex);
 		mutex_unlock(&pmc_grab_mutex);
 	}
 	}
 }
 }
 
 
-static void hw_perf_counter_destroy(struct perf_counter *counter)
+static void hw_perf_event_destroy(struct perf_event *event)
 {
 {
-	perf_counter_release_pmc();
+	perf_event_release_pmc();
 }
 }
 
 
-static int __hw_perf_counter_init(struct perf_counter *counter)
+static int __hw_perf_event_init(struct perf_event *event)
 {
 {
-	struct perf_counter_attr *attr = &counter->attr;
-	struct hw_perf_counter *hwc = &counter->hw;
+	struct perf_event_attr *attr = &event->attr;
+	struct hw_perf_event *hwc = &event->hw;
 	const struct perf_event_map *pmap;
 	const struct perf_event_map *pmap;
 	u64 enc;
 	u64 enc;
 
 
@@ -396,8 +396,8 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
 	if (attr->config >= sparc_pmu->max_events)
 	if (attr->config >= sparc_pmu->max_events)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	perf_counter_grab_pmc();
-	counter->destroy = hw_perf_counter_destroy;
+	perf_event_grab_pmc();
+	event->destroy = hw_perf_event_destroy;
 
 
 	/* We save the enable bits in the config_base.  So to
 	/* We save the enable bits in the config_base.  So to
 	 * turn off sampling just write 'config', and to enable
 	 * turn off sampling just write 'config', and to enable
@@ -439,16 +439,16 @@ static const struct pmu pmu = {
 	.unthrottle	= sparc_pmu_unthrottle,
 	.unthrottle	= sparc_pmu_unthrottle,
 };
 };
 
 
-const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
+const struct pmu *hw_perf_event_init(struct perf_event *event)
 {
 {
-	int err = __hw_perf_counter_init(counter);
+	int err = __hw_perf_event_init(event);
 
 
 	if (err)
 	if (err)
 		return ERR_PTR(err);
 		return ERR_PTR(err);
 	return &pmu;
 	return &pmu;
 }
 }
 
 
-void perf_counter_print_debug(void)
+void perf_event_print_debug(void)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
 	u64 pcr, pic;
 	u64 pcr, pic;
@@ -471,16 +471,16 @@ void perf_counter_print_debug(void)
 	local_irq_restore(flags);
 	local_irq_restore(flags);
 }
 }
 
 
-static int __kprobes perf_counter_nmi_handler(struct notifier_block *self,
+static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
 					      unsigned long cmd, void *__args)
 					      unsigned long cmd, void *__args)
 {
 {
 	struct die_args *args = __args;
 	struct die_args *args = __args;
 	struct perf_sample_data data;
 	struct perf_sample_data data;
-	struct cpu_hw_counters *cpuc;
+	struct cpu_hw_events *cpuc;
 	struct pt_regs *regs;
 	struct pt_regs *regs;
 	int idx;
 	int idx;
 
 
-	if (!atomic_read(&active_counters))
+	if (!atomic_read(&active_events))
 		return NOTIFY_DONE;
 		return NOTIFY_DONE;
 
 
 	switch (cmd) {
 	switch (cmd) {
@@ -495,32 +495,32 @@ static int __kprobes perf_counter_nmi_handler(struct notifier_block *self,
 
 
 	data.addr = 0;
 	data.addr = 0;
 
 
-	cpuc = &__get_cpu_var(cpu_hw_counters);
-	for (idx = 0; idx < MAX_HWCOUNTERS; idx++) {
-		struct perf_counter *counter = cpuc->counters[idx];
-		struct hw_perf_counter *hwc;
+	cpuc = &__get_cpu_var(cpu_hw_events);
+	for (idx = 0; idx < MAX_HWEVENTS; idx++) {
+		struct perf_event *event = cpuc->events[idx];
+		struct hw_perf_event *hwc;
 		u64 val;
 		u64 val;
 
 
 		if (!test_bit(idx, cpuc->active_mask))
 		if (!test_bit(idx, cpuc->active_mask))
 			continue;
 			continue;
-		hwc = &counter->hw;
-		val = sparc_perf_counter_update(counter, hwc, idx);
+		hwc = &event->hw;
+		val = sparc_perf_event_update(event, hwc, idx);
 		if (val & (1ULL << 31))
 		if (val & (1ULL << 31))
 			continue;
 			continue;
 
 
-		data.period = counter->hw.last_period;
-		if (!sparc_perf_counter_set_period(counter, hwc, idx))
+		data.period = event->hw.last_period;
+		if (!sparc_perf_event_set_period(event, hwc, idx))
 			continue;
 			continue;
 
 
-		if (perf_counter_overflow(counter, 1, &data, regs))
-			sparc_pmu_disable_counter(hwc, idx);
+		if (perf_event_overflow(event, 1, &data, regs))
+			sparc_pmu_disable_event(hwc, idx);
 	}
 	}
 
 
 	return NOTIFY_STOP;
 	return NOTIFY_STOP;
 }
 }
 
 
-static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
-	.notifier_call		= perf_counter_nmi_handler,
+static __read_mostly struct notifier_block perf_event_nmi_notifier = {
+	.notifier_call		= perf_event_nmi_handler,
 };
 };
 
 
 static bool __init supported_pmu(void)
 static bool __init supported_pmu(void)
@@ -536,9 +536,9 @@ static bool __init supported_pmu(void)
 	return false;
 	return false;
 }
 }
 
 
-void __init init_hw_perf_counters(void)
+void __init init_hw_perf_events(void)
 {
 {
-	pr_info("Performance counters: ");
+	pr_info("Performance events: ");
 
 
 	if (!supported_pmu()) {
 	if (!supported_pmu()) {
 		pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
 		pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
@@ -547,10 +547,10 @@ void __init init_hw_perf_counters(void)
 
 
 	pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
 	pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
 
 
-	/* All sparc64 PMUs currently have 2 counters.  But this simple
-	 * driver only supports one active counter at a time.
+	/* All sparc64 PMUs currently have 2 events.  But this simple
+	 * driver only supports one active event at a time.
 	 */
 	 */
-	perf_max_counters = 1;
+	perf_max_events = 1;
 
 
-	register_die_notifier(&perf_counter_nmi_notifier);
+	register_die_notifier(&perf_event_nmi_notifier);
 }
 }

+ 1 - 1
arch/sparc/kernel/systbls_32.S

@@ -82,5 +82,5 @@ sys_call_table:
 /*310*/	.long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
 /*310*/	.long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
 /*315*/	.long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
 /*315*/	.long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
 /*320*/	.long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
 /*320*/	.long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
-/*325*/	.long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_counter_open
+/*325*/	.long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open
 
 

+ 2 - 2
arch/sparc/kernel/systbls_64.S

@@ -83,7 +83,7 @@ sys_call_table32:
 /*310*/	.word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate
 /*310*/	.word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate
 	.word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1
 	.word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1
 /*320*/	.word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
 /*320*/	.word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
-	.word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_counter_open
+	.word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open
 
 
 #endif /* CONFIG_COMPAT */
 #endif /* CONFIG_COMPAT */
 
 
@@ -158,4 +158,4 @@ sys_call_table:
 /*310*/	.word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
 /*310*/	.word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
 	.word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
 	.word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
 /*320*/	.word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
 /*320*/	.word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
-	.word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_counter_open
+	.word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open

+ 1 - 1
arch/x86/Kconfig

@@ -24,7 +24,7 @@ config X86
 	select HAVE_UNSTABLE_SCHED_CLOCK
 	select HAVE_UNSTABLE_SCHED_CLOCK
 	select HAVE_IDE
 	select HAVE_IDE
 	select HAVE_OPROFILE
 	select HAVE_OPROFILE
-	select HAVE_PERF_COUNTERS if (!M386 && !M486)
+	select HAVE_PERF_EVENTS if (!M386 && !M486)
 	select HAVE_IOREMAP_PROT
 	select HAVE_IOREMAP_PROT
 	select HAVE_KPROBES
 	select HAVE_KPROBES
 	select ARCH_WANT_OPTIONAL_GPIOLIB
 	select ARCH_WANT_OPTIONAL_GPIOLIB

+ 1 - 1
arch/x86/ia32/ia32entry.S

@@ -831,5 +831,5 @@ ia32_sys_call_table:
 	.quad compat_sys_preadv
 	.quad compat_sys_preadv
 	.quad compat_sys_pwritev
 	.quad compat_sys_pwritev
 	.quad compat_sys_rt_tgsigqueueinfo	/* 335 */
 	.quad compat_sys_rt_tgsigqueueinfo	/* 335 */
-	.quad sys_perf_counter_open
+	.quad sys_perf_event_open
 ia32_syscall_end:
 ia32_syscall_end:

+ 1 - 1
arch/x86/include/asm/entry_arch.h

@@ -49,7 +49,7 @@ BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
 BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
 BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
 BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
 BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
 
 
-#ifdef CONFIG_PERF_COUNTERS
+#ifdef CONFIG_PERF_EVENTS
 BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR)
 BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR)
 #endif
 #endif
 
 

+ 15 - 15
arch/x86/include/asm/perf_counter.h → arch/x86/include/asm/perf_event.h

@@ -1,8 +1,8 @@
-#ifndef _ASM_X86_PERF_COUNTER_H
-#define _ASM_X86_PERF_COUNTER_H
+#ifndef _ASM_X86_PERF_EVENT_H
+#define _ASM_X86_PERF_EVENT_H
 
 
 /*
 /*
- * Performance counter hw details:
+ * Performance event hw details:
  */
  */
 
 
 #define X86_PMC_MAX_GENERIC					8
 #define X86_PMC_MAX_GENERIC					8
@@ -43,7 +43,7 @@
 union cpuid10_eax {
 union cpuid10_eax {
 	struct {
 	struct {
 		unsigned int version_id:8;
 		unsigned int version_id:8;
-		unsigned int num_counters:8;
+		unsigned int num_events:8;
 		unsigned int bit_width:8;
 		unsigned int bit_width:8;
 		unsigned int mask_length:8;
 		unsigned int mask_length:8;
 	} split;
 	} split;
@@ -52,7 +52,7 @@ union cpuid10_eax {
 
 
 union cpuid10_edx {
 union cpuid10_edx {
 	struct {
 	struct {
-		unsigned int num_counters_fixed:4;
+		unsigned int num_events_fixed:4;
 		unsigned int reserved:28;
 		unsigned int reserved:28;
 	} split;
 	} split;
 	unsigned int full;
 	unsigned int full;
@@ -60,7 +60,7 @@ union cpuid10_edx {
 
 
 
 
 /*
 /*
- * Fixed-purpose performance counters:
+ * Fixed-purpose performance events:
  */
  */
 
 
 /*
 /*
@@ -87,22 +87,22 @@ union cpuid10_edx {
 /*
 /*
  * We model BTS tracing as another fixed-mode PMC.
  * We model BTS tracing as another fixed-mode PMC.
  *
  *
- * We choose a value in the middle of the fixed counter range, since lower
- * values are used by actual fixed counters and higher values are used
+ * We choose a value in the middle of the fixed event range, since lower
+ * values are used by actual fixed events and higher values are used
  * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
  * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
  */
  */
 #define X86_PMC_IDX_FIXED_BTS				(X86_PMC_IDX_FIXED + 16)
 #define X86_PMC_IDX_FIXED_BTS				(X86_PMC_IDX_FIXED + 16)
 
 
 
 
-#ifdef CONFIG_PERF_COUNTERS
-extern void init_hw_perf_counters(void);
-extern void perf_counters_lapic_init(void);
+#ifdef CONFIG_PERF_EVENTS
+extern void init_hw_perf_events(void);
+extern void perf_events_lapic_init(void);
 
 
-#define PERF_COUNTER_INDEX_OFFSET			0
+#define PERF_EVENT_INDEX_OFFSET			0
 
 
 #else
 #else
-static inline void init_hw_perf_counters(void)		{ }
-static inline void perf_counters_lapic_init(void)	{ }
+static inline void init_hw_perf_events(void)		{ }
+static inline void perf_events_lapic_init(void)	{ }
 #endif
 #endif
 
 
-#endif /* _ASM_X86_PERF_COUNTER_H */
+#endif /* _ASM_X86_PERF_EVENT_H */

+ 1 - 1
arch/x86/include/asm/unistd_32.h

@@ -341,7 +341,7 @@
 #define __NR_preadv		333
 #define __NR_preadv		333
 #define __NR_pwritev		334
 #define __NR_pwritev		334
 #define __NR_rt_tgsigqueueinfo	335
 #define __NR_rt_tgsigqueueinfo	335
-#define __NR_perf_counter_open	336
+#define __NR_perf_event_open	336
 
 
 #ifdef __KERNEL__
 #ifdef __KERNEL__
 
 

+ 2 - 2
arch/x86/include/asm/unistd_64.h

@@ -659,8 +659,8 @@ __SYSCALL(__NR_preadv, sys_preadv)
 __SYSCALL(__NR_pwritev, sys_pwritev)
 __SYSCALL(__NR_pwritev, sys_pwritev)
 #define __NR_rt_tgsigqueueinfo			297
 #define __NR_rt_tgsigqueueinfo			297
 __SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo)
 __SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo)
-#define __NR_perf_counter_open			298
-__SYSCALL(__NR_perf_counter_open, sys_perf_counter_open)
+#define __NR_perf_event_open			298
+__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
 
 
 #ifndef __NO_STUBS
 #ifndef __NO_STUBS
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_OLD_READDIR

+ 3 - 3
arch/x86/kernel/apic/apic.c

@@ -14,7 +14,7 @@
  *	Mikael Pettersson	:	PM converted to driver model.
  *	Mikael Pettersson	:	PM converted to driver model.
  */
  */
 
 
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
 #include <linux/kernel_stat.h>
 #include <linux/kernel_stat.h>
 #include <linux/mc146818rtc.h>
 #include <linux/mc146818rtc.h>
 #include <linux/acpi_pmtmr.h>
 #include <linux/acpi_pmtmr.h>
@@ -35,7 +35,7 @@
 #include <linux/smp.h>
 #include <linux/smp.h>
 #include <linux/mm.h>
 #include <linux/mm.h>
 
 
-#include <asm/perf_counter.h>
+#include <asm/perf_event.h>
 #include <asm/x86_init.h>
 #include <asm/x86_init.h>
 #include <asm/pgalloc.h>
 #include <asm/pgalloc.h>
 #include <asm/atomic.h>
 #include <asm/atomic.h>
@@ -1189,7 +1189,7 @@ void __cpuinit setup_local_APIC(void)
 		apic_write(APIC_ESR, 0);
 		apic_write(APIC_ESR, 0);
 	}
 	}
 #endif
 #endif
-	perf_counters_lapic_init();
+	perf_events_lapic_init();
 
 
 	preempt_disable();
 	preempt_disable();
 
 

+ 1 - 1
arch/x86/kernel/cpu/Makefile

@@ -27,7 +27,7 @@ obj-$(CONFIG_CPU_SUP_CENTAUR)		+= centaur.o
 obj-$(CONFIG_CPU_SUP_TRANSMETA_32)	+= transmeta.o
 obj-$(CONFIG_CPU_SUP_TRANSMETA_32)	+= transmeta.o
 obj-$(CONFIG_CPU_SUP_UMC_32)		+= umc.o
 obj-$(CONFIG_CPU_SUP_UMC_32)		+= umc.o
 
 
-obj-$(CONFIG_PERF_COUNTERS)		+= perf_counter.o
+obj-$(CONFIG_PERF_EVENTS)		+= perf_event.o
 
 
 obj-$(CONFIG_X86_MCE)			+= mcheck/
 obj-$(CONFIG_X86_MCE)			+= mcheck/
 obj-$(CONFIG_MTRR)			+= mtrr/
 obj-$(CONFIG_MTRR)			+= mtrr/

+ 2 - 2
arch/x86/kernel/cpu/common.c

@@ -13,7 +13,7 @@
 #include <linux/io.h>
 #include <linux/io.h>
 
 
 #include <asm/stackprotector.h>
 #include <asm/stackprotector.h>
-#include <asm/perf_counter.h>
+#include <asm/perf_event.h>
 #include <asm/mmu_context.h>
 #include <asm/mmu_context.h>
 #include <asm/hypervisor.h>
 #include <asm/hypervisor.h>
 #include <asm/processor.h>
 #include <asm/processor.h>
@@ -869,7 +869,7 @@ void __init identify_boot_cpu(void)
 #else
 #else
 	vgetcpu_set_mode();
 	vgetcpu_set_mode();
 #endif
 #endif
-	init_hw_perf_counters();
+	init_hw_perf_events();
 }
 }
 
 
 void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
 void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)

+ 278 - 278
arch/x86/kernel/cpu/perf_counter.c → arch/x86/kernel/cpu/perf_event.c

@@ -1,5 +1,5 @@
 /*
 /*
- * Performance counter x86 architecture code
+ * Performance events x86 architecture code
  *
  *
  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
@@ -11,7 +11,7 @@
  *  For licencing details see kernel-base/COPYING
  *  For licencing details see kernel-base/COPYING
  */
  */
 
 
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
 #include <linux/capability.h>
 #include <linux/capability.h>
 #include <linux/notifier.h>
 #include <linux/notifier.h>
 #include <linux/hardirq.h>
 #include <linux/hardirq.h>
@@ -27,10 +27,10 @@
 #include <asm/stacktrace.h>
 #include <asm/stacktrace.h>
 #include <asm/nmi.h>
 #include <asm/nmi.h>
 
 
-static u64 perf_counter_mask __read_mostly;
+static u64 perf_event_mask __read_mostly;
 
 
-/* The maximal number of PEBS counters: */
-#define MAX_PEBS_COUNTERS	4
+/* The maximal number of PEBS events: */
+#define MAX_PEBS_EVENTS	4
 
 
 /* The size of a BTS record in bytes: */
 /* The size of a BTS record in bytes: */
 #define BTS_RECORD_SIZE		24
 #define BTS_RECORD_SIZE		24
@@ -65,11 +65,11 @@ struct debug_store {
 	u64	pebs_index;
 	u64	pebs_index;
 	u64	pebs_absolute_maximum;
 	u64	pebs_absolute_maximum;
 	u64	pebs_interrupt_threshold;
 	u64	pebs_interrupt_threshold;
-	u64	pebs_counter_reset[MAX_PEBS_COUNTERS];
+	u64	pebs_event_reset[MAX_PEBS_EVENTS];
 };
 };
 
 
-struct cpu_hw_counters {
-	struct perf_counter	*counters[X86_PMC_IDX_MAX];
+struct cpu_hw_events {
+	struct perf_event	*events[X86_PMC_IDX_MAX];
 	unsigned long		used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
 	unsigned long		used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
 	unsigned long		active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
 	unsigned long		active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
 	unsigned long		interrupts;
 	unsigned long		interrupts;
@@ -86,17 +86,17 @@ struct x86_pmu {
 	int		(*handle_irq)(struct pt_regs *);
 	int		(*handle_irq)(struct pt_regs *);
 	void		(*disable_all)(void);
 	void		(*disable_all)(void);
 	void		(*enable_all)(void);
 	void		(*enable_all)(void);
-	void		(*enable)(struct hw_perf_counter *, int);
-	void		(*disable)(struct hw_perf_counter *, int);
+	void		(*enable)(struct hw_perf_event *, int);
+	void		(*disable)(struct hw_perf_event *, int);
 	unsigned	eventsel;
 	unsigned	eventsel;
 	unsigned	perfctr;
 	unsigned	perfctr;
 	u64		(*event_map)(int);
 	u64		(*event_map)(int);
 	u64		(*raw_event)(u64);
 	u64		(*raw_event)(u64);
 	int		max_events;
 	int		max_events;
-	int		num_counters;
-	int		num_counters_fixed;
-	int		counter_bits;
-	u64		counter_mask;
+	int		num_events;
+	int		num_events_fixed;
+	int		event_bits;
+	u64		event_mask;
 	int		apic;
 	int		apic;
 	u64		max_period;
 	u64		max_period;
 	u64		intel_ctrl;
 	u64		intel_ctrl;
@@ -106,7 +106,7 @@ struct x86_pmu {
 
 
 static struct x86_pmu x86_pmu __read_mostly;
 static struct x86_pmu x86_pmu __read_mostly;
 
 
-static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
+static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
 	.enabled = 1,
 	.enabled = 1,
 };
 };
 
 
@@ -124,35 +124,35 @@ static const u64 p6_perfmon_event_map[] =
   [PERF_COUNT_HW_BUS_CYCLES]		= 0x0062,
   [PERF_COUNT_HW_BUS_CYCLES]		= 0x0062,
 };
 };
 
 
-static u64 p6_pmu_event_map(int event)
+static u64 p6_pmu_event_map(int hw_event)
 {
 {
-	return p6_perfmon_event_map[event];
+	return p6_perfmon_event_map[hw_event];
 }
 }
 
 
 /*
 /*
- * Counter setting that is specified not to count anything.
+ * Event setting that is specified not to count anything.
  * We use this to effectively disable a counter.
  * We use this to effectively disable a counter.
  *
  *
  * L2_RQSTS with 0 MESI unit mask.
  * L2_RQSTS with 0 MESI unit mask.
  */
  */
-#define P6_NOP_COUNTER			0x0000002EULL
+#define P6_NOP_EVENT			0x0000002EULL
 
 
-static u64 p6_pmu_raw_event(u64 event)
+static u64 p6_pmu_raw_event(u64 hw_event)
 {
 {
 #define P6_EVNTSEL_EVENT_MASK		0x000000FFULL
 #define P6_EVNTSEL_EVENT_MASK		0x000000FFULL
 #define P6_EVNTSEL_UNIT_MASK		0x0000FF00ULL
 #define P6_EVNTSEL_UNIT_MASK		0x0000FF00ULL
 #define P6_EVNTSEL_EDGE_MASK		0x00040000ULL
 #define P6_EVNTSEL_EDGE_MASK		0x00040000ULL
 #define P6_EVNTSEL_INV_MASK		0x00800000ULL
 #define P6_EVNTSEL_INV_MASK		0x00800000ULL
-#define P6_EVNTSEL_COUNTER_MASK		0xFF000000ULL
+#define P6_EVNTSEL_REG_MASK		0xFF000000ULL
 
 
 #define P6_EVNTSEL_MASK			\
 #define P6_EVNTSEL_MASK			\
 	(P6_EVNTSEL_EVENT_MASK |	\
 	(P6_EVNTSEL_EVENT_MASK |	\
 	 P6_EVNTSEL_UNIT_MASK  |	\
 	 P6_EVNTSEL_UNIT_MASK  |	\
 	 P6_EVNTSEL_EDGE_MASK  |	\
 	 P6_EVNTSEL_EDGE_MASK  |	\
 	 P6_EVNTSEL_INV_MASK   |	\
 	 P6_EVNTSEL_INV_MASK   |	\
-	 P6_EVNTSEL_COUNTER_MASK)
+	 P6_EVNTSEL_REG_MASK)
 
 
-	return event & P6_EVNTSEL_MASK;
+	return hw_event & P6_EVNTSEL_MASK;
 }
 }
 
 
 
 
@@ -170,16 +170,16 @@ static const u64 intel_perfmon_event_map[] =
   [PERF_COUNT_HW_BUS_CYCLES]		= 0x013c,
   [PERF_COUNT_HW_BUS_CYCLES]		= 0x013c,
 };
 };
 
 
-static u64 intel_pmu_event_map(int event)
+static u64 intel_pmu_event_map(int hw_event)
 {
 {
-	return intel_perfmon_event_map[event];
+	return intel_perfmon_event_map[hw_event];
 }
 }
 
 
 /*
 /*
- * Generalized hw caching related event table, filled
+ * Generalized hw caching related hw_event table, filled
  * in on a per model basis. A value of 0 means
  * in on a per model basis. A value of 0 means
- * 'not supported', -1 means 'event makes no sense on
- * this CPU', any other value means the raw event
+ * 'not supported', -1 means 'hw_event makes no sense on
+ * this CPU', any other value means the raw hw_event
  * ID.
  * ID.
  */
  */
 
 
@@ -463,22 +463,22 @@ static const u64 atom_hw_cache_event_ids
  },
  },
 };
 };
 
 
-static u64 intel_pmu_raw_event(u64 event)
+static u64 intel_pmu_raw_event(u64 hw_event)
 {
 {
 #define CORE_EVNTSEL_EVENT_MASK		0x000000FFULL
 #define CORE_EVNTSEL_EVENT_MASK		0x000000FFULL
 #define CORE_EVNTSEL_UNIT_MASK		0x0000FF00ULL
 #define CORE_EVNTSEL_UNIT_MASK		0x0000FF00ULL
 #define CORE_EVNTSEL_EDGE_MASK		0x00040000ULL
 #define CORE_EVNTSEL_EDGE_MASK		0x00040000ULL
 #define CORE_EVNTSEL_INV_MASK		0x00800000ULL
 #define CORE_EVNTSEL_INV_MASK		0x00800000ULL
-#define CORE_EVNTSEL_COUNTER_MASK	0xFF000000ULL
+#define CORE_EVNTSEL_REG_MASK	0xFF000000ULL
 
 
 #define CORE_EVNTSEL_MASK		\
 #define CORE_EVNTSEL_MASK		\
 	(CORE_EVNTSEL_EVENT_MASK |	\
 	(CORE_EVNTSEL_EVENT_MASK |	\
 	 CORE_EVNTSEL_UNIT_MASK  |	\
 	 CORE_EVNTSEL_UNIT_MASK  |	\
 	 CORE_EVNTSEL_EDGE_MASK  |	\
 	 CORE_EVNTSEL_EDGE_MASK  |	\
 	 CORE_EVNTSEL_INV_MASK  |	\
 	 CORE_EVNTSEL_INV_MASK  |	\
-	 CORE_EVNTSEL_COUNTER_MASK)
+	 CORE_EVNTSEL_REG_MASK)
 
 
-	return event & CORE_EVNTSEL_MASK;
+	return hw_event & CORE_EVNTSEL_MASK;
 }
 }
 
 
 static const u64 amd_hw_cache_event_ids
 static const u64 amd_hw_cache_event_ids
@@ -585,39 +585,39 @@ static const u64 amd_perfmon_event_map[] =
   [PERF_COUNT_HW_BRANCH_MISSES]		= 0x00c5,
   [PERF_COUNT_HW_BRANCH_MISSES]		= 0x00c5,
 };
 };
 
 
-static u64 amd_pmu_event_map(int event)
+static u64 amd_pmu_event_map(int hw_event)
 {
 {
-	return amd_perfmon_event_map[event];
+	return amd_perfmon_event_map[hw_event];
 }
 }
 
 
-static u64 amd_pmu_raw_event(u64 event)
+static u64 amd_pmu_raw_event(u64 hw_event)
 {
 {
 #define K7_EVNTSEL_EVENT_MASK	0x7000000FFULL
 #define K7_EVNTSEL_EVENT_MASK	0x7000000FFULL
 #define K7_EVNTSEL_UNIT_MASK	0x00000FF00ULL
 #define K7_EVNTSEL_UNIT_MASK	0x00000FF00ULL
 #define K7_EVNTSEL_EDGE_MASK	0x000040000ULL
 #define K7_EVNTSEL_EDGE_MASK	0x000040000ULL
 #define K7_EVNTSEL_INV_MASK	0x000800000ULL
 #define K7_EVNTSEL_INV_MASK	0x000800000ULL
-#define K7_EVNTSEL_COUNTER_MASK	0x0FF000000ULL
+#define K7_EVNTSEL_REG_MASK	0x0FF000000ULL
 
 
 #define K7_EVNTSEL_MASK			\
 #define K7_EVNTSEL_MASK			\
 	(K7_EVNTSEL_EVENT_MASK |	\
 	(K7_EVNTSEL_EVENT_MASK |	\
 	 K7_EVNTSEL_UNIT_MASK  |	\
 	 K7_EVNTSEL_UNIT_MASK  |	\
 	 K7_EVNTSEL_EDGE_MASK  |	\
 	 K7_EVNTSEL_EDGE_MASK  |	\
 	 K7_EVNTSEL_INV_MASK   |	\
 	 K7_EVNTSEL_INV_MASK   |	\
-	 K7_EVNTSEL_COUNTER_MASK)
+	 K7_EVNTSEL_REG_MASK)
 
 
-	return event & K7_EVNTSEL_MASK;
+	return hw_event & K7_EVNTSEL_MASK;
 }
 }
 
 
 /*
 /*
- * Propagate counter elapsed time into the generic counter.
- * Can only be executed on the CPU where the counter is active.
+ * Propagate event elapsed time into the generic event.
+ * Can only be executed on the CPU where the event is active.
  * Returns the delta events processed.
  * Returns the delta events processed.
  */
  */
 static u64
 static u64
-x86_perf_counter_update(struct perf_counter *counter,
-			struct hw_perf_counter *hwc, int idx)
+x86_perf_event_update(struct perf_event *event,
+			struct hw_perf_event *hwc, int idx)
 {
 {
-	int shift = 64 - x86_pmu.counter_bits;
+	int shift = 64 - x86_pmu.event_bits;
 	u64 prev_raw_count, new_raw_count;
 	u64 prev_raw_count, new_raw_count;
 	s64 delta;
 	s64 delta;
 
 
@@ -625,15 +625,15 @@ x86_perf_counter_update(struct perf_counter *counter,
 		return 0;
 		return 0;
 
 
 	/*
 	/*
-	 * Careful: an NMI might modify the previous counter value.
+	 * Careful: an NMI might modify the previous event value.
 	 *
 	 *
 	 * Our tactic to handle this is to first atomically read and
 	 * Our tactic to handle this is to first atomically read and
 	 * exchange a new raw count - then add that new-prev delta
 	 * exchange a new raw count - then add that new-prev delta
-	 * count to the generic counter atomically:
+	 * count to the generic event atomically:
 	 */
 	 */
 again:
 again:
 	prev_raw_count = atomic64_read(&hwc->prev_count);
 	prev_raw_count = atomic64_read(&hwc->prev_count);
-	rdmsrl(hwc->counter_base + idx, new_raw_count);
+	rdmsrl(hwc->event_base + idx, new_raw_count);
 
 
 	if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
 	if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
 					new_raw_count) != prev_raw_count)
 					new_raw_count) != prev_raw_count)
@@ -642,7 +642,7 @@ again:
 	/*
 	/*
 	 * Now we have the new raw value and have updated the prev
 	 * Now we have the new raw value and have updated the prev
 	 * timestamp already. We can now calculate the elapsed delta
 	 * timestamp already. We can now calculate the elapsed delta
-	 * (counter-)time and add that to the generic counter.
+	 * (event-)time and add that to the generic event.
 	 *
 	 *
 	 * Careful, not all hw sign-extends above the physical width
 	 * Careful, not all hw sign-extends above the physical width
 	 * of the count.
 	 * of the count.
@@ -650,13 +650,13 @@ again:
 	delta = (new_raw_count << shift) - (prev_raw_count << shift);
 	delta = (new_raw_count << shift) - (prev_raw_count << shift);
 	delta >>= shift;
 	delta >>= shift;
 
 
-	atomic64_add(delta, &counter->count);
+	atomic64_add(delta, &event->count);
 	atomic64_sub(delta, &hwc->period_left);
 	atomic64_sub(delta, &hwc->period_left);
 
 
 	return new_raw_count;
 	return new_raw_count;
 }
 }
 
 
-static atomic_t active_counters;
+static atomic_t active_events;
 static DEFINE_MUTEX(pmc_reserve_mutex);
 static DEFINE_MUTEX(pmc_reserve_mutex);
 
 
 static bool reserve_pmc_hardware(void)
 static bool reserve_pmc_hardware(void)
@@ -667,12 +667,12 @@ static bool reserve_pmc_hardware(void)
 	if (nmi_watchdog == NMI_LOCAL_APIC)
 	if (nmi_watchdog == NMI_LOCAL_APIC)
 		disable_lapic_nmi_watchdog();
 		disable_lapic_nmi_watchdog();
 
 
-	for (i = 0; i < x86_pmu.num_counters; i++) {
+	for (i = 0; i < x86_pmu.num_events; i++) {
 		if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
 		if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
 			goto perfctr_fail;
 			goto perfctr_fail;
 	}
 	}
 
 
-	for (i = 0; i < x86_pmu.num_counters; i++) {
+	for (i = 0; i < x86_pmu.num_events; i++) {
 		if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
 		if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
 			goto eventsel_fail;
 			goto eventsel_fail;
 	}
 	}
@@ -685,7 +685,7 @@ eventsel_fail:
 	for (i--; i >= 0; i--)
 	for (i--; i >= 0; i--)
 		release_evntsel_nmi(x86_pmu.eventsel + i);
 		release_evntsel_nmi(x86_pmu.eventsel + i);
 
 
-	i = x86_pmu.num_counters;
+	i = x86_pmu.num_events;
 
 
 perfctr_fail:
 perfctr_fail:
 	for (i--; i >= 0; i--)
 	for (i--; i >= 0; i--)
@@ -703,7 +703,7 @@ static void release_pmc_hardware(void)
 #ifdef CONFIG_X86_LOCAL_APIC
 #ifdef CONFIG_X86_LOCAL_APIC
 	int i;
 	int i;
 
 
-	for (i = 0; i < x86_pmu.num_counters; i++) {
+	for (i = 0; i < x86_pmu.num_events; i++) {
 		release_perfctr_nmi(x86_pmu.perfctr + i);
 		release_perfctr_nmi(x86_pmu.perfctr + i);
 		release_evntsel_nmi(x86_pmu.eventsel + i);
 		release_evntsel_nmi(x86_pmu.eventsel + i);
 	}
 	}
@@ -720,7 +720,7 @@ static inline bool bts_available(void)
 
 
 static inline void init_debug_store_on_cpu(int cpu)
 static inline void init_debug_store_on_cpu(int cpu)
 {
 {
-	struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds;
+	struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
 
 
 	if (!ds)
 	if (!ds)
 		return;
 		return;
@@ -732,7 +732,7 @@ static inline void init_debug_store_on_cpu(int cpu)
 
 
 static inline void fini_debug_store_on_cpu(int cpu)
 static inline void fini_debug_store_on_cpu(int cpu)
 {
 {
-	if (!per_cpu(cpu_hw_counters, cpu).ds)
+	if (!per_cpu(cpu_hw_events, cpu).ds)
 		return;
 		return;
 
 
 	wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
 	wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
@@ -751,12 +751,12 @@ static void release_bts_hardware(void)
 		fini_debug_store_on_cpu(cpu);
 		fini_debug_store_on_cpu(cpu);
 
 
 	for_each_possible_cpu(cpu) {
 	for_each_possible_cpu(cpu) {
-		struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds;
+		struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
 
 
 		if (!ds)
 		if (!ds)
 			continue;
 			continue;
 
 
-		per_cpu(cpu_hw_counters, cpu).ds = NULL;
+		per_cpu(cpu_hw_events, cpu).ds = NULL;
 
 
 		kfree((void *)(unsigned long)ds->bts_buffer_base);
 		kfree((void *)(unsigned long)ds->bts_buffer_base);
 		kfree(ds);
 		kfree(ds);
@@ -796,7 +796,7 @@ static int reserve_bts_hardware(void)
 		ds->bts_interrupt_threshold =
 		ds->bts_interrupt_threshold =
 			ds->bts_absolute_maximum - BTS_OVFL_TH;
 			ds->bts_absolute_maximum - BTS_OVFL_TH;
 
 
-		per_cpu(cpu_hw_counters, cpu).ds = ds;
+		per_cpu(cpu_hw_events, cpu).ds = ds;
 		err = 0;
 		err = 0;
 	}
 	}
 
 
@@ -812,9 +812,9 @@ static int reserve_bts_hardware(void)
 	return err;
 	return err;
 }
 }
 
 
-static void hw_perf_counter_destroy(struct perf_counter *counter)
+static void hw_perf_event_destroy(struct perf_event *event)
 {
 {
-	if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) {
+	if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
 		release_pmc_hardware();
 		release_pmc_hardware();
 		release_bts_hardware();
 		release_bts_hardware();
 		mutex_unlock(&pmc_reserve_mutex);
 		mutex_unlock(&pmc_reserve_mutex);
@@ -827,7 +827,7 @@ static inline int x86_pmu_initialized(void)
 }
 }
 
 
 static inline int
 static inline int
-set_ext_hw_attr(struct hw_perf_counter *hwc, struct perf_counter_attr *attr)
+set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
 {
 {
 	unsigned int cache_type, cache_op, cache_result;
 	unsigned int cache_type, cache_op, cache_result;
 	u64 config, val;
 	u64 config, val;
@@ -880,7 +880,7 @@ static void intel_pmu_enable_bts(u64 config)
 
 
 static void intel_pmu_disable_bts(void)
 static void intel_pmu_disable_bts(void)
 {
 {
-	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 	unsigned long debugctlmsr;
 	unsigned long debugctlmsr;
 
 
 	if (!cpuc->ds)
 	if (!cpuc->ds)
@@ -898,10 +898,10 @@ static void intel_pmu_disable_bts(void)
 /*
 /*
  * Setup the hardware configuration for a given attr_type
  * Setup the hardware configuration for a given attr_type
  */
  */
-static int __hw_perf_counter_init(struct perf_counter *counter)
+static int __hw_perf_event_init(struct perf_event *event)
 {
 {
-	struct perf_counter_attr *attr = &counter->attr;
-	struct hw_perf_counter *hwc = &counter->hw;
+	struct perf_event_attr *attr = &event->attr;
+	struct hw_perf_event *hwc = &event->hw;
 	u64 config;
 	u64 config;
 	int err;
 	int err;
 
 
@@ -909,22 +909,22 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
 		return -ENODEV;
 		return -ENODEV;
 
 
 	err = 0;
 	err = 0;
-	if (!atomic_inc_not_zero(&active_counters)) {
+	if (!atomic_inc_not_zero(&active_events)) {
 		mutex_lock(&pmc_reserve_mutex);
 		mutex_lock(&pmc_reserve_mutex);
-		if (atomic_read(&active_counters) == 0) {
+		if (atomic_read(&active_events) == 0) {
 			if (!reserve_pmc_hardware())
 			if (!reserve_pmc_hardware())
 				err = -EBUSY;
 				err = -EBUSY;
 			else
 			else
 				err = reserve_bts_hardware();
 				err = reserve_bts_hardware();
 		}
 		}
 		if (!err)
 		if (!err)
-			atomic_inc(&active_counters);
+			atomic_inc(&active_events);
 		mutex_unlock(&pmc_reserve_mutex);
 		mutex_unlock(&pmc_reserve_mutex);
 	}
 	}
 	if (err)
 	if (err)
 		return err;
 		return err;
 
 
-	counter->destroy = hw_perf_counter_destroy;
+	event->destroy = hw_perf_event_destroy;
 
 
 	/*
 	/*
 	 * Generate PMC IRQs:
 	 * Generate PMC IRQs:
@@ -948,15 +948,15 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
 		/*
 		/*
 		 * If we have a PMU initialized but no APIC
 		 * If we have a PMU initialized but no APIC
 		 * interrupts, we cannot sample hardware
 		 * interrupts, we cannot sample hardware
-		 * counters (user-space has to fall back and
-		 * sample via a hrtimer based software counter):
+		 * events (user-space has to fall back and
+		 * sample via a hrtimer based software event):
 		 */
 		 */
 		if (!x86_pmu.apic)
 		if (!x86_pmu.apic)
 			return -EOPNOTSUPP;
 			return -EOPNOTSUPP;
 	}
 	}
 
 
 	/*
 	/*
-	 * Raw event type provide the config in the event structure
+	 * Raw hw_event type provide the config in the hw_event structure
 	 */
 	 */
 	if (attr->type == PERF_TYPE_RAW) {
 	if (attr->type == PERF_TYPE_RAW) {
 		hwc->config |= x86_pmu.raw_event(attr->config);
 		hwc->config |= x86_pmu.raw_event(attr->config);
@@ -1001,7 +1001,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
 
 
 static void p6_pmu_disable_all(void)
 static void p6_pmu_disable_all(void)
 {
 {
-	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 	u64 val;
 	u64 val;
 
 
 	if (!cpuc->enabled)
 	if (!cpuc->enabled)
@@ -1018,7 +1018,7 @@ static void p6_pmu_disable_all(void)
 
 
 static void intel_pmu_disable_all(void)
 static void intel_pmu_disable_all(void)
 {
 {
-	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
 
 	if (!cpuc->enabled)
 	if (!cpuc->enabled)
 		return;
 		return;
@@ -1034,7 +1034,7 @@ static void intel_pmu_disable_all(void)
 
 
 static void amd_pmu_disable_all(void)
 static void amd_pmu_disable_all(void)
 {
 {
-	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 	int idx;
 	int idx;
 
 
 	if (!cpuc->enabled)
 	if (!cpuc->enabled)
@@ -1043,12 +1043,12 @@ static void amd_pmu_disable_all(void)
 	cpuc->enabled = 0;
 	cpuc->enabled = 0;
 	/*
 	/*
 	 * ensure we write the disable before we start disabling the
 	 * ensure we write the disable before we start disabling the
-	 * counters proper, so that amd_pmu_enable_counter() does the
+	 * events proper, so that amd_pmu_enable_event() does the
 	 * right thing.
 	 * right thing.
 	 */
 	 */
 	barrier();
 	barrier();
 
 
-	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+	for (idx = 0; idx < x86_pmu.num_events; idx++) {
 		u64 val;
 		u64 val;
 
 
 		if (!test_bit(idx, cpuc->active_mask))
 		if (!test_bit(idx, cpuc->active_mask))
@@ -1070,7 +1070,7 @@ void hw_perf_disable(void)
 
 
 static void p6_pmu_enable_all(void)
 static void p6_pmu_enable_all(void)
 {
 {
-	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 	unsigned long val;
 	unsigned long val;
 
 
 	if (cpuc->enabled)
 	if (cpuc->enabled)
@@ -1087,7 +1087,7 @@ static void p6_pmu_enable_all(void)
 
 
 static void intel_pmu_enable_all(void)
 static void intel_pmu_enable_all(void)
 {
 {
-	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
 
 	if (cpuc->enabled)
 	if (cpuc->enabled)
 		return;
 		return;
@@ -1098,19 +1098,19 @@ static void intel_pmu_enable_all(void)
 	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
 	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
 
 
 	if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
 	if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
-		struct perf_counter *counter =
-			cpuc->counters[X86_PMC_IDX_FIXED_BTS];
+		struct perf_event *event =
+			cpuc->events[X86_PMC_IDX_FIXED_BTS];
 
 
-		if (WARN_ON_ONCE(!counter))
+		if (WARN_ON_ONCE(!event))
 			return;
 			return;
 
 
-		intel_pmu_enable_bts(counter->hw.config);
+		intel_pmu_enable_bts(event->hw.config);
 	}
 	}
 }
 }
 
 
 static void amd_pmu_enable_all(void)
 static void amd_pmu_enable_all(void)
 {
 {
-	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 	int idx;
 	int idx;
 
 
 	if (cpuc->enabled)
 	if (cpuc->enabled)
@@ -1119,14 +1119,14 @@ static void amd_pmu_enable_all(void)
 	cpuc->enabled = 1;
 	cpuc->enabled = 1;
 	barrier();
 	barrier();
 
 
-	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
-		struct perf_counter *counter = cpuc->counters[idx];
+	for (idx = 0; idx < x86_pmu.num_events; idx++) {
+		struct perf_event *event = cpuc->events[idx];
 		u64 val;
 		u64 val;
 
 
 		if (!test_bit(idx, cpuc->active_mask))
 		if (!test_bit(idx, cpuc->active_mask))
 			continue;
 			continue;
 
 
-		val = counter->hw.config;
+		val = event->hw.config;
 		val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
 		val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
 		wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
 		wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
 	}
 	}
@@ -1153,19 +1153,19 @@ static inline void intel_pmu_ack_status(u64 ack)
 	wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
 	wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
 }
 }
 
 
-static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
+static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
 {
 {
 	(void)checking_wrmsrl(hwc->config_base + idx,
 	(void)checking_wrmsrl(hwc->config_base + idx,
 			      hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
 			      hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
 }
 }
 
 
-static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
+static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
 {
 {
 	(void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
 	(void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
 }
 }
 
 
 static inline void
 static inline void
-intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
+intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
 {
 {
 	int idx = __idx - X86_PMC_IDX_FIXED;
 	int idx = __idx - X86_PMC_IDX_FIXED;
 	u64 ctrl_val, mask;
 	u64 ctrl_val, mask;
@@ -1178,10 +1178,10 @@ intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
 }
 }
 
 
 static inline void
 static inline void
-p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
+p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
 {
 {
-	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
-	u64 val = P6_NOP_COUNTER;
+	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+	u64 val = P6_NOP_EVENT;
 
 
 	if (cpuc->enabled)
 	if (cpuc->enabled)
 		val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
 		val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
@@ -1190,7 +1190,7 @@ p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
 }
 }
 
 
 static inline void
 static inline void
-intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
+intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
 {
 {
 	if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
 	if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
 		intel_pmu_disable_bts();
 		intel_pmu_disable_bts();
@@ -1202,24 +1202,24 @@ intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
 		return;
 		return;
 	}
 	}
 
 
-	x86_pmu_disable_counter(hwc, idx);
+	x86_pmu_disable_event(hwc, idx);
 }
 }
 
 
 static inline void
 static inline void
-amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
+amd_pmu_disable_event(struct hw_perf_event *hwc, int idx)
 {
 {
-	x86_pmu_disable_counter(hwc, idx);
+	x86_pmu_disable_event(hwc, idx);
 }
 }
 
 
 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
 
 
 /*
 /*
  * Set the next IRQ period, based on the hwc->period_left value.
  * Set the next IRQ period, based on the hwc->period_left value.
- * To be called with the counter disabled in hw:
+ * To be called with the event disabled in hw:
  */
  */
 static int
 static int
-x86_perf_counter_set_period(struct perf_counter *counter,
-			     struct hw_perf_counter *hwc, int idx)
+x86_perf_event_set_period(struct perf_event *event,
+			     struct hw_perf_event *hwc, int idx)
 {
 {
 	s64 left = atomic64_read(&hwc->period_left);
 	s64 left = atomic64_read(&hwc->period_left);
 	s64 period = hwc->sample_period;
 	s64 period = hwc->sample_period;
@@ -1245,7 +1245,7 @@ x86_perf_counter_set_period(struct perf_counter *counter,
 		ret = 1;
 		ret = 1;
 	}
 	}
 	/*
 	/*
-	 * Quirk: certain CPUs dont like it if just 1 event is left:
+	 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
 	 */
 	 */
 	if (unlikely(left < 2))
 	if (unlikely(left < 2))
 		left = 2;
 		left = 2;
@@ -1256,21 +1256,21 @@ x86_perf_counter_set_period(struct perf_counter *counter,
 	per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
 	per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
 
 
 	/*
 	/*
-	 * The hw counter starts counting from this counter offset,
+	 * The hw event starts counting from this event offset,
 	 * mark it to be able to extra future deltas:
 	 * mark it to be able to extra future deltas:
 	 */
 	 */
 	atomic64_set(&hwc->prev_count, (u64)-left);
 	atomic64_set(&hwc->prev_count, (u64)-left);
 
 
-	err = checking_wrmsrl(hwc->counter_base + idx,
-			     (u64)(-left) & x86_pmu.counter_mask);
+	err = checking_wrmsrl(hwc->event_base + idx,
+			     (u64)(-left) & x86_pmu.event_mask);
 
 
-	perf_counter_update_userpage(counter);
+	perf_event_update_userpage(event);
 
 
 	return ret;
 	return ret;
 }
 }
 
 
 static inline void
 static inline void
-intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx)
+intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
 {
 {
 	int idx = __idx - X86_PMC_IDX_FIXED;
 	int idx = __idx - X86_PMC_IDX_FIXED;
 	u64 ctrl_val, bits, mask;
 	u64 ctrl_val, bits, mask;
@@ -1295,9 +1295,9 @@ intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx)
 	err = checking_wrmsrl(hwc->config_base, ctrl_val);
 	err = checking_wrmsrl(hwc->config_base, ctrl_val);
 }
 }
 
 
-static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
+static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
 {
 {
-	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 	u64 val;
 	u64 val;
 
 
 	val = hwc->config;
 	val = hwc->config;
@@ -1308,10 +1308,10 @@ static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
 }
 }
 
 
 
 
-static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
+static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
 {
 {
 	if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
 	if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
-		if (!__get_cpu_var(cpu_hw_counters).enabled)
+		if (!__get_cpu_var(cpu_hw_events).enabled)
 			return;
 			return;
 
 
 		intel_pmu_enable_bts(hwc->config);
 		intel_pmu_enable_bts(hwc->config);
@@ -1323,134 +1323,134 @@ static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
 		return;
 		return;
 	}
 	}
 
 
-	x86_pmu_enable_counter(hwc, idx);
+	x86_pmu_enable_event(hwc, idx);
 }
 }
 
 
-static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
+static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
 {
 {
-	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
 
 	if (cpuc->enabled)
 	if (cpuc->enabled)
-		x86_pmu_enable_counter(hwc, idx);
+		x86_pmu_enable_event(hwc, idx);
 }
 }
 
 
 static int
 static int
-fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
+fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc)
 {
 {
-	unsigned int event;
+	unsigned int hw_event;
 
 
-	event = hwc->config & ARCH_PERFMON_EVENT_MASK;
+	hw_event = hwc->config & ARCH_PERFMON_EVENT_MASK;
 
 
-	if (unlikely((event ==
+	if (unlikely((hw_event ==
 		      x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
 		      x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
 		     (hwc->sample_period == 1)))
 		     (hwc->sample_period == 1)))
 		return X86_PMC_IDX_FIXED_BTS;
 		return X86_PMC_IDX_FIXED_BTS;
 
 
-	if (!x86_pmu.num_counters_fixed)
+	if (!x86_pmu.num_events_fixed)
 		return -1;
 		return -1;
 
 
-	if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
+	if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
 		return X86_PMC_IDX_FIXED_INSTRUCTIONS;
 		return X86_PMC_IDX_FIXED_INSTRUCTIONS;
-	if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES)))
+	if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES)))
 		return X86_PMC_IDX_FIXED_CPU_CYCLES;
 		return X86_PMC_IDX_FIXED_CPU_CYCLES;
-	if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES)))
+	if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES)))
 		return X86_PMC_IDX_FIXED_BUS_CYCLES;
 		return X86_PMC_IDX_FIXED_BUS_CYCLES;
 
 
 	return -1;
 	return -1;
 }
 }
 
 
 /*
 /*
- * Find a PMC slot for the freshly enabled / scheduled in counter:
+ * Find a PMC slot for the freshly enabled / scheduled in event:
  */
  */
-static int x86_pmu_enable(struct perf_counter *counter)
+static int x86_pmu_enable(struct perf_event *event)
 {
 {
-	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
-	struct hw_perf_counter *hwc = &counter->hw;
+	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+	struct hw_perf_event *hwc = &event->hw;
 	int idx;
 	int idx;
 
 
-	idx = fixed_mode_idx(counter, hwc);
+	idx = fixed_mode_idx(event, hwc);
 	if (idx == X86_PMC_IDX_FIXED_BTS) {
 	if (idx == X86_PMC_IDX_FIXED_BTS) {
 		/* BTS is already occupied. */
 		/* BTS is already occupied. */
 		if (test_and_set_bit(idx, cpuc->used_mask))
 		if (test_and_set_bit(idx, cpuc->used_mask))
 			return -EAGAIN;
 			return -EAGAIN;
 
 
 		hwc->config_base	= 0;
 		hwc->config_base	= 0;
-		hwc->counter_base	= 0;
+		hwc->event_base	= 0;
 		hwc->idx		= idx;
 		hwc->idx		= idx;
 	} else if (idx >= 0) {
 	} else if (idx >= 0) {
 		/*
 		/*
-		 * Try to get the fixed counter, if that is already taken
-		 * then try to get a generic counter:
+		 * Try to get the fixed event, if that is already taken
+		 * then try to get a generic event:
 		 */
 		 */
 		if (test_and_set_bit(idx, cpuc->used_mask))
 		if (test_and_set_bit(idx, cpuc->used_mask))
 			goto try_generic;
 			goto try_generic;
 
 
 		hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
 		hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
 		/*
 		/*
-		 * We set it so that counter_base + idx in wrmsr/rdmsr maps to
+		 * We set it so that event_base + idx in wrmsr/rdmsr maps to
 		 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
 		 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
 		 */
 		 */
-		hwc->counter_base =
+		hwc->event_base =
 			MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
 			MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
 		hwc->idx = idx;
 		hwc->idx = idx;
 	} else {
 	} else {
 		idx = hwc->idx;
 		idx = hwc->idx;
-		/* Try to get the previous generic counter again */
+		/* Try to get the previous generic event again */
 		if (test_and_set_bit(idx, cpuc->used_mask)) {
 		if (test_and_set_bit(idx, cpuc->used_mask)) {
 try_generic:
 try_generic:
 			idx = find_first_zero_bit(cpuc->used_mask,
 			idx = find_first_zero_bit(cpuc->used_mask,
-						  x86_pmu.num_counters);
-			if (idx == x86_pmu.num_counters)
+						  x86_pmu.num_events);
+			if (idx == x86_pmu.num_events)
 				return -EAGAIN;
 				return -EAGAIN;
 
 
 			set_bit(idx, cpuc->used_mask);
 			set_bit(idx, cpuc->used_mask);
 			hwc->idx = idx;
 			hwc->idx = idx;
 		}
 		}
 		hwc->config_base  = x86_pmu.eventsel;
 		hwc->config_base  = x86_pmu.eventsel;
-		hwc->counter_base = x86_pmu.perfctr;
+		hwc->event_base = x86_pmu.perfctr;
 	}
 	}
 
 
-	perf_counters_lapic_init();
+	perf_events_lapic_init();
 
 
 	x86_pmu.disable(hwc, idx);
 	x86_pmu.disable(hwc, idx);
 
 
-	cpuc->counters[idx] = counter;
+	cpuc->events[idx] = event;
 	set_bit(idx, cpuc->active_mask);
 	set_bit(idx, cpuc->active_mask);
 
 
-	x86_perf_counter_set_period(counter, hwc, idx);
+	x86_perf_event_set_period(event, hwc, idx);
 	x86_pmu.enable(hwc, idx);
 	x86_pmu.enable(hwc, idx);
 
 
-	perf_counter_update_userpage(counter);
+	perf_event_update_userpage(event);
 
 
 	return 0;
 	return 0;
 }
 }
 
 
-static void x86_pmu_unthrottle(struct perf_counter *counter)
+static void x86_pmu_unthrottle(struct perf_event *event)
 {
 {
-	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
-	struct hw_perf_counter *hwc = &counter->hw;
+	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+	struct hw_perf_event *hwc = &event->hw;
 
 
 	if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
 	if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
-				cpuc->counters[hwc->idx] != counter))
+				cpuc->events[hwc->idx] != event))
 		return;
 		return;
 
 
 	x86_pmu.enable(hwc, hwc->idx);
 	x86_pmu.enable(hwc, hwc->idx);
 }
 }
 
 
-void perf_counter_print_debug(void)
+void perf_event_print_debug(void)
 {
 {
 	u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
 	u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
-	struct cpu_hw_counters *cpuc;
+	struct cpu_hw_events *cpuc;
 	unsigned long flags;
 	unsigned long flags;
 	int cpu, idx;
 	int cpu, idx;
 
 
-	if (!x86_pmu.num_counters)
+	if (!x86_pmu.num_events)
 		return;
 		return;
 
 
 	local_irq_save(flags);
 	local_irq_save(flags);
 
 
 	cpu = smp_processor_id();
 	cpu = smp_processor_id();
-	cpuc = &per_cpu(cpu_hw_counters, cpu);
+	cpuc = &per_cpu(cpu_hw_events, cpu);
 
 
 	if (x86_pmu.version >= 2) {
 	if (x86_pmu.version >= 2) {
 		rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
 		rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
@@ -1466,7 +1466,7 @@ void perf_counter_print_debug(void)
 	}
 	}
 	pr_info("CPU#%d: used:       %016llx\n", cpu, *(u64 *)cpuc->used_mask);
 	pr_info("CPU#%d: used:       %016llx\n", cpu, *(u64 *)cpuc->used_mask);
 
 
-	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+	for (idx = 0; idx < x86_pmu.num_events; idx++) {
 		rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
 		rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
 		rdmsrl(x86_pmu.perfctr  + idx, pmc_count);
 		rdmsrl(x86_pmu.perfctr  + idx, pmc_count);
 
 
@@ -1479,7 +1479,7 @@ void perf_counter_print_debug(void)
 		pr_info("CPU#%d:   gen-PMC%d left:  %016llx\n",
 		pr_info("CPU#%d:   gen-PMC%d left:  %016llx\n",
 			cpu, idx, prev_left);
 			cpu, idx, prev_left);
 	}
 	}
-	for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
+	for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
 		rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
 		rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
 
 
 		pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
 		pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
@@ -1488,7 +1488,7 @@ void perf_counter_print_debug(void)
 	local_irq_restore(flags);
 	local_irq_restore(flags);
 }
 }
 
 
-static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc)
+static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
 {
 {
 	struct debug_store *ds = cpuc->ds;
 	struct debug_store *ds = cpuc->ds;
 	struct bts_record {
 	struct bts_record {
@@ -1496,14 +1496,14 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc)
 		u64	to;
 		u64	to;
 		u64	flags;
 		u64	flags;
 	};
 	};
-	struct perf_counter *counter = cpuc->counters[X86_PMC_IDX_FIXED_BTS];
+	struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
 	struct bts_record *at, *top;
 	struct bts_record *at, *top;
 	struct perf_output_handle handle;
 	struct perf_output_handle handle;
 	struct perf_event_header header;
 	struct perf_event_header header;
 	struct perf_sample_data data;
 	struct perf_sample_data data;
 	struct pt_regs regs;
 	struct pt_regs regs;
 
 
-	if (!counter)
+	if (!event)
 		return;
 		return;
 
 
 	if (!ds)
 	if (!ds)
@@ -1518,7 +1518,7 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc)
 	ds->bts_index = ds->bts_buffer_base;
 	ds->bts_index = ds->bts_buffer_base;
 
 
 
 
-	data.period	= counter->hw.last_period;
+	data.period	= event->hw.last_period;
 	data.addr	= 0;
 	data.addr	= 0;
 	regs.ip		= 0;
 	regs.ip		= 0;
 
 
@@ -1527,9 +1527,9 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc)
 	 * We will overwrite the from and to address before we output
 	 * We will overwrite the from and to address before we output
 	 * the sample.
 	 * the sample.
 	 */
 	 */
-	perf_prepare_sample(&header, &data, counter, &regs);
+	perf_prepare_sample(&header, &data, event, &regs);
 
 
-	if (perf_output_begin(&handle, counter,
+	if (perf_output_begin(&handle, event,
 			      header.size * (top - at), 1, 1))
 			      header.size * (top - at), 1, 1))
 		return;
 		return;
 
 
@@ -1537,20 +1537,20 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc)
 		data.ip		= at->from;
 		data.ip		= at->from;
 		data.addr	= at->to;
 		data.addr	= at->to;
 
 
-		perf_output_sample(&handle, &header, &data, counter);
+		perf_output_sample(&handle, &header, &data, event);
 	}
 	}
 
 
 	perf_output_end(&handle);
 	perf_output_end(&handle);
 
 
 	/* There's new data available. */
 	/* There's new data available. */
-	counter->hw.interrupts++;
-	counter->pending_kill = POLL_IN;
+	event->hw.interrupts++;
+	event->pending_kill = POLL_IN;
 }
 }
 
 
-static void x86_pmu_disable(struct perf_counter *counter)
+static void x86_pmu_disable(struct perf_event *event)
 {
 {
-	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
-	struct hw_perf_counter *hwc = &counter->hw;
+	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+	struct hw_perf_event *hwc = &event->hw;
 	int idx = hwc->idx;
 	int idx = hwc->idx;
 
 
 	/*
 	/*
@@ -1562,63 +1562,63 @@ static void x86_pmu_disable(struct perf_counter *counter)
 
 
 	/*
 	/*
 	 * Make sure the cleared pointer becomes visible before we
 	 * Make sure the cleared pointer becomes visible before we
-	 * (potentially) free the counter:
+	 * (potentially) free the event:
 	 */
 	 */
 	barrier();
 	barrier();
 
 
 	/*
 	/*
-	 * Drain the remaining delta count out of a counter
+	 * Drain the remaining delta count out of a event
 	 * that we are disabling:
 	 * that we are disabling:
 	 */
 	 */
-	x86_perf_counter_update(counter, hwc, idx);
+	x86_perf_event_update(event, hwc, idx);
 
 
 	/* Drain the remaining BTS records. */
 	/* Drain the remaining BTS records. */
 	if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
 	if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
 		intel_pmu_drain_bts_buffer(cpuc);
 		intel_pmu_drain_bts_buffer(cpuc);
 
 
-	cpuc->counters[idx] = NULL;
+	cpuc->events[idx] = NULL;
 	clear_bit(idx, cpuc->used_mask);
 	clear_bit(idx, cpuc->used_mask);
 
 
-	perf_counter_update_userpage(counter);
+	perf_event_update_userpage(event);
 }
 }
 
 
 /*
 /*
- * Save and restart an expired counter. Called by NMI contexts,
- * so it has to be careful about preempting normal counter ops:
+ * Save and restart an expired event. Called by NMI contexts,
+ * so it has to be careful about preempting normal event ops:
  */
  */
-static int intel_pmu_save_and_restart(struct perf_counter *counter)
+static int intel_pmu_save_and_restart(struct perf_event *event)
 {
 {
-	struct hw_perf_counter *hwc = &counter->hw;
+	struct hw_perf_event *hwc = &event->hw;
 	int idx = hwc->idx;
 	int idx = hwc->idx;
 	int ret;
 	int ret;
 
 
-	x86_perf_counter_update(counter, hwc, idx);
-	ret = x86_perf_counter_set_period(counter, hwc, idx);
+	x86_perf_event_update(event, hwc, idx);
+	ret = x86_perf_event_set_period(event, hwc, idx);
 
 
-	if (counter->state == PERF_COUNTER_STATE_ACTIVE)
-		intel_pmu_enable_counter(hwc, idx);
+	if (event->state == PERF_EVENT_STATE_ACTIVE)
+		intel_pmu_enable_event(hwc, idx);
 
 
 	return ret;
 	return ret;
 }
 }
 
 
 static void intel_pmu_reset(void)
 static void intel_pmu_reset(void)
 {
 {
-	struct debug_store *ds = __get_cpu_var(cpu_hw_counters).ds;
+	struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
 	unsigned long flags;
 	unsigned long flags;
 	int idx;
 	int idx;
 
 
-	if (!x86_pmu.num_counters)
+	if (!x86_pmu.num_events)
 		return;
 		return;
 
 
 	local_irq_save(flags);
 	local_irq_save(flags);
 
 
 	printk("clearing PMU state on CPU#%d\n", smp_processor_id());
 	printk("clearing PMU state on CPU#%d\n", smp_processor_id());
 
 
-	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+	for (idx = 0; idx < x86_pmu.num_events; idx++) {
 		checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
 		checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
 		checking_wrmsrl(x86_pmu.perfctr  + idx, 0ull);
 		checking_wrmsrl(x86_pmu.perfctr  + idx, 0ull);
 	}
 	}
-	for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
+	for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
 		checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
 		checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
 	}
 	}
 	if (ds)
 	if (ds)
@@ -1630,38 +1630,38 @@ static void intel_pmu_reset(void)
 static int p6_pmu_handle_irq(struct pt_regs *regs)
 static int p6_pmu_handle_irq(struct pt_regs *regs)
 {
 {
 	struct perf_sample_data data;
 	struct perf_sample_data data;
-	struct cpu_hw_counters *cpuc;
-	struct perf_counter *counter;
-	struct hw_perf_counter *hwc;
+	struct cpu_hw_events *cpuc;
+	struct perf_event *event;
+	struct hw_perf_event *hwc;
 	int idx, handled = 0;
 	int idx, handled = 0;
 	u64 val;
 	u64 val;
 
 
 	data.addr = 0;
 	data.addr = 0;
 
 
-	cpuc = &__get_cpu_var(cpu_hw_counters);
+	cpuc = &__get_cpu_var(cpu_hw_events);
 
 
-	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+	for (idx = 0; idx < x86_pmu.num_events; idx++) {
 		if (!test_bit(idx, cpuc->active_mask))
 		if (!test_bit(idx, cpuc->active_mask))
 			continue;
 			continue;
 
 
-		counter = cpuc->counters[idx];
-		hwc = &counter->hw;
+		event = cpuc->events[idx];
+		hwc = &event->hw;
 
 
-		val = x86_perf_counter_update(counter, hwc, idx);
-		if (val & (1ULL << (x86_pmu.counter_bits - 1)))
+		val = x86_perf_event_update(event, hwc, idx);
+		if (val & (1ULL << (x86_pmu.event_bits - 1)))
 			continue;
 			continue;
 
 
 		/*
 		/*
-		 * counter overflow
+		 * event overflow
 		 */
 		 */
 		handled		= 1;
 		handled		= 1;
-		data.period	= counter->hw.last_period;
+		data.period	= event->hw.last_period;
 
 
-		if (!x86_perf_counter_set_period(counter, hwc, idx))
+		if (!x86_perf_event_set_period(event, hwc, idx))
 			continue;
 			continue;
 
 
-		if (perf_counter_overflow(counter, 1, &data, regs))
-			p6_pmu_disable_counter(hwc, idx);
+		if (perf_event_overflow(event, 1, &data, regs))
+			p6_pmu_disable_event(hwc, idx);
 	}
 	}
 
 
 	if (handled)
 	if (handled)
@@ -1677,13 +1677,13 @@ static int p6_pmu_handle_irq(struct pt_regs *regs)
 static int intel_pmu_handle_irq(struct pt_regs *regs)
 static int intel_pmu_handle_irq(struct pt_regs *regs)
 {
 {
 	struct perf_sample_data data;
 	struct perf_sample_data data;
-	struct cpu_hw_counters *cpuc;
+	struct cpu_hw_events *cpuc;
 	int bit, loops;
 	int bit, loops;
 	u64 ack, status;
 	u64 ack, status;
 
 
 	data.addr = 0;
 	data.addr = 0;
 
 
-	cpuc = &__get_cpu_var(cpu_hw_counters);
+	cpuc = &__get_cpu_var(cpu_hw_events);
 
 
 	perf_disable();
 	perf_disable();
 	intel_pmu_drain_bts_buffer(cpuc);
 	intel_pmu_drain_bts_buffer(cpuc);
@@ -1696,8 +1696,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
 	loops = 0;
 	loops = 0;
 again:
 again:
 	if (++loops > 100) {
 	if (++loops > 100) {
-		WARN_ONCE(1, "perfcounters: irq loop stuck!\n");
-		perf_counter_print_debug();
+		WARN_ONCE(1, "perfevents: irq loop stuck!\n");
+		perf_event_print_debug();
 		intel_pmu_reset();
 		intel_pmu_reset();
 		perf_enable();
 		perf_enable();
 		return 1;
 		return 1;
@@ -1706,19 +1706,19 @@ again:
 	inc_irq_stat(apic_perf_irqs);
 	inc_irq_stat(apic_perf_irqs);
 	ack = status;
 	ack = status;
 	for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
 	for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
-		struct perf_counter *counter = cpuc->counters[bit];
+		struct perf_event *event = cpuc->events[bit];
 
 
 		clear_bit(bit, (unsigned long *) &status);
 		clear_bit(bit, (unsigned long *) &status);
 		if (!test_bit(bit, cpuc->active_mask))
 		if (!test_bit(bit, cpuc->active_mask))
 			continue;
 			continue;
 
 
-		if (!intel_pmu_save_and_restart(counter))
+		if (!intel_pmu_save_and_restart(event))
 			continue;
 			continue;
 
 
-		data.period = counter->hw.last_period;
+		data.period = event->hw.last_period;
 
 
-		if (perf_counter_overflow(counter, 1, &data, regs))
-			intel_pmu_disable_counter(&counter->hw, bit);
+		if (perf_event_overflow(event, 1, &data, regs))
+			intel_pmu_disable_event(&event->hw, bit);
 	}
 	}
 
 
 	intel_pmu_ack_status(ack);
 	intel_pmu_ack_status(ack);
@@ -1738,38 +1738,38 @@ again:
 static int amd_pmu_handle_irq(struct pt_regs *regs)
 static int amd_pmu_handle_irq(struct pt_regs *regs)
 {
 {
 	struct perf_sample_data data;
 	struct perf_sample_data data;
-	struct cpu_hw_counters *cpuc;
-	struct perf_counter *counter;
-	struct hw_perf_counter *hwc;
+	struct cpu_hw_events *cpuc;
+	struct perf_event *event;
+	struct hw_perf_event *hwc;
 	int idx, handled = 0;
 	int idx, handled = 0;
 	u64 val;
 	u64 val;
 
 
 	data.addr = 0;
 	data.addr = 0;
 
 
-	cpuc = &__get_cpu_var(cpu_hw_counters);
+	cpuc = &__get_cpu_var(cpu_hw_events);
 
 
-	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+	for (idx = 0; idx < x86_pmu.num_events; idx++) {
 		if (!test_bit(idx, cpuc->active_mask))
 		if (!test_bit(idx, cpuc->active_mask))
 			continue;
 			continue;
 
 
-		counter = cpuc->counters[idx];
-		hwc = &counter->hw;
+		event = cpuc->events[idx];
+		hwc = &event->hw;
 
 
-		val = x86_perf_counter_update(counter, hwc, idx);
-		if (val & (1ULL << (x86_pmu.counter_bits - 1)))
+		val = x86_perf_event_update(event, hwc, idx);
+		if (val & (1ULL << (x86_pmu.event_bits - 1)))
 			continue;
 			continue;
 
 
 		/*
 		/*
-		 * counter overflow
+		 * event overflow
 		 */
 		 */
 		handled		= 1;
 		handled		= 1;
-		data.period	= counter->hw.last_period;
+		data.period	= event->hw.last_period;
 
 
-		if (!x86_perf_counter_set_period(counter, hwc, idx))
+		if (!x86_perf_event_set_period(event, hwc, idx))
 			continue;
 			continue;
 
 
-		if (perf_counter_overflow(counter, 1, &data, regs))
-			amd_pmu_disable_counter(hwc, idx);
+		if (perf_event_overflow(event, 1, &data, regs))
+			amd_pmu_disable_event(hwc, idx);
 	}
 	}
 
 
 	if (handled)
 	if (handled)
@@ -1783,18 +1783,18 @@ void smp_perf_pending_interrupt(struct pt_regs *regs)
 	irq_enter();
 	irq_enter();
 	ack_APIC_irq();
 	ack_APIC_irq();
 	inc_irq_stat(apic_pending_irqs);
 	inc_irq_stat(apic_pending_irqs);
-	perf_counter_do_pending();
+	perf_event_do_pending();
 	irq_exit();
 	irq_exit();
 }
 }
 
 
-void set_perf_counter_pending(void)
+void set_perf_event_pending(void)
 {
 {
 #ifdef CONFIG_X86_LOCAL_APIC
 #ifdef CONFIG_X86_LOCAL_APIC
 	apic->send_IPI_self(LOCAL_PENDING_VECTOR);
 	apic->send_IPI_self(LOCAL_PENDING_VECTOR);
 #endif
 #endif
 }
 }
 
 
-void perf_counters_lapic_init(void)
+void perf_events_lapic_init(void)
 {
 {
 #ifdef CONFIG_X86_LOCAL_APIC
 #ifdef CONFIG_X86_LOCAL_APIC
 	if (!x86_pmu.apic || !x86_pmu_initialized())
 	if (!x86_pmu.apic || !x86_pmu_initialized())
@@ -1808,13 +1808,13 @@ void perf_counters_lapic_init(void)
 }
 }
 
 
 static int __kprobes
 static int __kprobes
-perf_counter_nmi_handler(struct notifier_block *self,
+perf_event_nmi_handler(struct notifier_block *self,
 			 unsigned long cmd, void *__args)
 			 unsigned long cmd, void *__args)
 {
 {
 	struct die_args *args = __args;
 	struct die_args *args = __args;
 	struct pt_regs *regs;
 	struct pt_regs *regs;
 
 
-	if (!atomic_read(&active_counters))
+	if (!atomic_read(&active_events))
 		return NOTIFY_DONE;
 		return NOTIFY_DONE;
 
 
 	switch (cmd) {
 	switch (cmd) {
@@ -1833,7 +1833,7 @@ perf_counter_nmi_handler(struct notifier_block *self,
 #endif
 #endif
 	/*
 	/*
 	 * Can't rely on the handled return value to say it was our NMI, two
 	 * Can't rely on the handled return value to say it was our NMI, two
-	 * counters could trigger 'simultaneously' raising two back-to-back NMIs.
+	 * events could trigger 'simultaneously' raising two back-to-back NMIs.
 	 *
 	 *
 	 * If the first NMI handles both, the latter will be empty and daze
 	 * If the first NMI handles both, the latter will be empty and daze
 	 * the CPU.
 	 * the CPU.
@@ -1843,8 +1843,8 @@ perf_counter_nmi_handler(struct notifier_block *self,
 	return NOTIFY_STOP;
 	return NOTIFY_STOP;
 }
 }
 
 
-static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
-	.notifier_call		= perf_counter_nmi_handler,
+static __read_mostly struct notifier_block perf_event_nmi_notifier = {
+	.notifier_call		= perf_event_nmi_handler,
 	.next			= NULL,
 	.next			= NULL,
 	.priority		= 1
 	.priority		= 1
 };
 };
@@ -1854,8 +1854,8 @@ static struct x86_pmu p6_pmu = {
 	.handle_irq		= p6_pmu_handle_irq,
 	.handle_irq		= p6_pmu_handle_irq,
 	.disable_all		= p6_pmu_disable_all,
 	.disable_all		= p6_pmu_disable_all,
 	.enable_all		= p6_pmu_enable_all,
 	.enable_all		= p6_pmu_enable_all,
-	.enable			= p6_pmu_enable_counter,
-	.disable		= p6_pmu_disable_counter,
+	.enable			= p6_pmu_enable_event,
+	.disable		= p6_pmu_disable_event,
 	.eventsel		= MSR_P6_EVNTSEL0,
 	.eventsel		= MSR_P6_EVNTSEL0,
 	.perfctr		= MSR_P6_PERFCTR0,
 	.perfctr		= MSR_P6_PERFCTR0,
 	.event_map		= p6_pmu_event_map,
 	.event_map		= p6_pmu_event_map,
@@ -1864,16 +1864,16 @@ static struct x86_pmu p6_pmu = {
 	.apic			= 1,
 	.apic			= 1,
 	.max_period		= (1ULL << 31) - 1,
 	.max_period		= (1ULL << 31) - 1,
 	.version		= 0,
 	.version		= 0,
-	.num_counters		= 2,
+	.num_events		= 2,
 	/*
 	/*
-	 * Counters have 40 bits implemented. However they are designed such
+	 * Events have 40 bits implemented. However they are designed such
 	 * that bits [32-39] are sign extensions of bit 31. As such the
 	 * that bits [32-39] are sign extensions of bit 31. As such the
-	 * effective width of a counter for P6-like PMU is 32 bits only.
+	 * effective width of a event for P6-like PMU is 32 bits only.
 	 *
 	 *
 	 * See IA-32 Intel Architecture Software developer manual Vol 3B
 	 * See IA-32 Intel Architecture Software developer manual Vol 3B
 	 */
 	 */
-	.counter_bits		= 32,
-	.counter_mask		= (1ULL << 32) - 1,
+	.event_bits		= 32,
+	.event_mask		= (1ULL << 32) - 1,
 };
 };
 
 
 static struct x86_pmu intel_pmu = {
 static struct x86_pmu intel_pmu = {
@@ -1881,8 +1881,8 @@ static struct x86_pmu intel_pmu = {
 	.handle_irq		= intel_pmu_handle_irq,
 	.handle_irq		= intel_pmu_handle_irq,
 	.disable_all		= intel_pmu_disable_all,
 	.disable_all		= intel_pmu_disable_all,
 	.enable_all		= intel_pmu_enable_all,
 	.enable_all		= intel_pmu_enable_all,
-	.enable			= intel_pmu_enable_counter,
-	.disable		= intel_pmu_disable_counter,
+	.enable			= intel_pmu_enable_event,
+	.disable		= intel_pmu_disable_event,
 	.eventsel		= MSR_ARCH_PERFMON_EVENTSEL0,
 	.eventsel		= MSR_ARCH_PERFMON_EVENTSEL0,
 	.perfctr		= MSR_ARCH_PERFMON_PERFCTR0,
 	.perfctr		= MSR_ARCH_PERFMON_PERFCTR0,
 	.event_map		= intel_pmu_event_map,
 	.event_map		= intel_pmu_event_map,
@@ -1892,7 +1892,7 @@ static struct x86_pmu intel_pmu = {
 	/*
 	/*
 	 * Intel PMCs cannot be accessed sanely above 32 bit width,
 	 * Intel PMCs cannot be accessed sanely above 32 bit width,
 	 * so we install an artificial 1<<31 period regardless of
 	 * so we install an artificial 1<<31 period regardless of
-	 * the generic counter period:
+	 * the generic event period:
 	 */
 	 */
 	.max_period		= (1ULL << 31) - 1,
 	.max_period		= (1ULL << 31) - 1,
 	.enable_bts		= intel_pmu_enable_bts,
 	.enable_bts		= intel_pmu_enable_bts,
@@ -1904,16 +1904,16 @@ static struct x86_pmu amd_pmu = {
 	.handle_irq		= amd_pmu_handle_irq,
 	.handle_irq		= amd_pmu_handle_irq,
 	.disable_all		= amd_pmu_disable_all,
 	.disable_all		= amd_pmu_disable_all,
 	.enable_all		= amd_pmu_enable_all,
 	.enable_all		= amd_pmu_enable_all,
-	.enable			= amd_pmu_enable_counter,
-	.disable		= amd_pmu_disable_counter,
+	.enable			= amd_pmu_enable_event,
+	.disable		= amd_pmu_disable_event,
 	.eventsel		= MSR_K7_EVNTSEL0,
 	.eventsel		= MSR_K7_EVNTSEL0,
 	.perfctr		= MSR_K7_PERFCTR0,
 	.perfctr		= MSR_K7_PERFCTR0,
 	.event_map		= amd_pmu_event_map,
 	.event_map		= amd_pmu_event_map,
 	.raw_event		= amd_pmu_raw_event,
 	.raw_event		= amd_pmu_raw_event,
 	.max_events		= ARRAY_SIZE(amd_perfmon_event_map),
 	.max_events		= ARRAY_SIZE(amd_perfmon_event_map),
-	.num_counters		= 4,
-	.counter_bits		= 48,
-	.counter_mask		= (1ULL << 48) - 1,
+	.num_events		= 4,
+	.event_bits		= 48,
+	.event_mask		= (1ULL << 48) - 1,
 	.apic			= 1,
 	.apic			= 1,
 	/* use highest bit to detect overflow */
 	/* use highest bit to detect overflow */
 	.max_period		= (1ULL << 47) - 1,
 	.max_period		= (1ULL << 47) - 1,
@@ -1970,7 +1970,7 @@ static int intel_pmu_init(void)
 
 
 	/*
 	/*
 	 * Check whether the Architectural PerfMon supports
 	 * Check whether the Architectural PerfMon supports
-	 * Branch Misses Retired Event or not.
+	 * Branch Misses Retired hw_event or not.
 	 */
 	 */
 	cpuid(10, &eax.full, &ebx, &unused, &edx.full);
 	cpuid(10, &eax.full, &ebx, &unused, &edx.full);
 	if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
 	if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
@@ -1982,15 +1982,15 @@ static int intel_pmu_init(void)
 
 
 	x86_pmu				= intel_pmu;
 	x86_pmu				= intel_pmu;
 	x86_pmu.version			= version;
 	x86_pmu.version			= version;
-	x86_pmu.num_counters		= eax.split.num_counters;
-	x86_pmu.counter_bits		= eax.split.bit_width;
-	x86_pmu.counter_mask		= (1ULL << eax.split.bit_width) - 1;
+	x86_pmu.num_events		= eax.split.num_events;
+	x86_pmu.event_bits		= eax.split.bit_width;
+	x86_pmu.event_mask		= (1ULL << eax.split.bit_width) - 1;
 
 
 	/*
 	/*
-	 * Quirk: v2 perfmon does not report fixed-purpose counters, so
-	 * assume at least 3 counters:
+	 * Quirk: v2 perfmon does not report fixed-purpose events, so
+	 * assume at least 3 events:
 	 */
 	 */
-	x86_pmu.num_counters_fixed	= max((int)edx.split.num_counters_fixed, 3);
+	x86_pmu.num_events_fixed	= max((int)edx.split.num_events_fixed, 3);
 
 
 	/*
 	/*
 	 * Install the hw-cache-events table:
 	 * Install the hw-cache-events table:
@@ -2037,11 +2037,11 @@ static int amd_pmu_init(void)
 	return 0;
 	return 0;
 }
 }
 
 
-void __init init_hw_perf_counters(void)
+void __init init_hw_perf_events(void)
 {
 {
 	int err;
 	int err;
 
 
-	pr_info("Performance Counters: ");
+	pr_info("Performance Events: ");
 
 
 	switch (boot_cpu_data.x86_vendor) {
 	switch (boot_cpu_data.x86_vendor) {
 	case X86_VENDOR_INTEL:
 	case X86_VENDOR_INTEL:
@@ -2054,45 +2054,45 @@ void __init init_hw_perf_counters(void)
 		return;
 		return;
 	}
 	}
 	if (err != 0) {
 	if (err != 0) {
-		pr_cont("no PMU driver, software counters only.\n");
+		pr_cont("no PMU driver, software events only.\n");
 		return;
 		return;
 	}
 	}
 
 
 	pr_cont("%s PMU driver.\n", x86_pmu.name);
 	pr_cont("%s PMU driver.\n", x86_pmu.name);
 
 
-	if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
-		WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
-		     x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
-		x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
+	if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
+		WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
+		     x86_pmu.num_events, X86_PMC_MAX_GENERIC);
+		x86_pmu.num_events = X86_PMC_MAX_GENERIC;
 	}
 	}
-	perf_counter_mask = (1 << x86_pmu.num_counters) - 1;
-	perf_max_counters = x86_pmu.num_counters;
+	perf_event_mask = (1 << x86_pmu.num_events) - 1;
+	perf_max_events = x86_pmu.num_events;
 
 
-	if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
-		WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
-		     x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
-		x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
+	if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
+		WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
+		     x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
+		x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
 	}
 	}
 
 
-	perf_counter_mask |=
-		((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
-	x86_pmu.intel_ctrl = perf_counter_mask;
+	perf_event_mask |=
+		((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
+	x86_pmu.intel_ctrl = perf_event_mask;
 
 
-	perf_counters_lapic_init();
-	register_die_notifier(&perf_counter_nmi_notifier);
+	perf_events_lapic_init();
+	register_die_notifier(&perf_event_nmi_notifier);
 
 
-	pr_info("... version:                 %d\n",     x86_pmu.version);
-	pr_info("... bit width:               %d\n",     x86_pmu.counter_bits);
-	pr_info("... generic counters:        %d\n",     x86_pmu.num_counters);
-	pr_info("... value mask:              %016Lx\n", x86_pmu.counter_mask);
-	pr_info("... max period:              %016Lx\n", x86_pmu.max_period);
-	pr_info("... fixed-purpose counters:  %d\n",     x86_pmu.num_counters_fixed);
-	pr_info("... counter mask:            %016Lx\n", perf_counter_mask);
+	pr_info("... version:                %d\n",     x86_pmu.version);
+	pr_info("... bit width:              %d\n",     x86_pmu.event_bits);
+	pr_info("... generic registers:      %d\n",     x86_pmu.num_events);
+	pr_info("... value mask:             %016Lx\n", x86_pmu.event_mask);
+	pr_info("... max period:             %016Lx\n", x86_pmu.max_period);
+	pr_info("... fixed-purpose events:   %d\n",     x86_pmu.num_events_fixed);
+	pr_info("... event mask:             %016Lx\n", perf_event_mask);
 }
 }
 
 
-static inline void x86_pmu_read(struct perf_counter *counter)
+static inline void x86_pmu_read(struct perf_event *event)
 {
 {
-	x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
+	x86_perf_event_update(event, &event->hw, event->hw.idx);
 }
 }
 
 
 static const struct pmu pmu = {
 static const struct pmu pmu = {
@@ -2102,14 +2102,14 @@ static const struct pmu pmu = {
 	.unthrottle	= x86_pmu_unthrottle,
 	.unthrottle	= x86_pmu_unthrottle,
 };
 };
 
 
-const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
+const struct pmu *hw_perf_event_init(struct perf_event *event)
 {
 {
 	int err;
 	int err;
 
 
-	err = __hw_perf_counter_init(counter);
+	err = __hw_perf_event_init(event);
 	if (err) {
 	if (err) {
-		if (counter->destroy)
-			counter->destroy(counter);
+		if (event->destroy)
+			event->destroy(event);
 		return ERR_PTR(err);
 		return ERR_PTR(err);
 	}
 	}
 
 
@@ -2292,7 +2292,7 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
 	return entry;
 	return entry;
 }
 }
 
 
-void hw_perf_counter_setup_online(int cpu)
+void hw_perf_event_setup_online(int cpu)
 {
 {
 	init_debug_store_on_cpu(cpu);
 	init_debug_store_on_cpu(cpu);
 }
 }

+ 1 - 1
arch/x86/kernel/cpu/perfctr-watchdog.c

@@ -20,7 +20,7 @@
 #include <linux/kprobes.h>
 #include <linux/kprobes.h>
 
 
 #include <asm/apic.h>
 #include <asm/apic.h>
-#include <asm/perf_counter.h>
+#include <asm/perf_event.h>
 
 
 struct nmi_watchdog_ctlblk {
 struct nmi_watchdog_ctlblk {
 	unsigned int cccr_msr;
 	unsigned int cccr_msr;

+ 1 - 1
arch/x86/kernel/entry_64.S

@@ -1021,7 +1021,7 @@ apicinterrupt ERROR_APIC_VECTOR \
 apicinterrupt SPURIOUS_APIC_VECTOR \
 apicinterrupt SPURIOUS_APIC_VECTOR \
 	spurious_interrupt smp_spurious_interrupt
 	spurious_interrupt smp_spurious_interrupt
 
 
-#ifdef CONFIG_PERF_COUNTERS
+#ifdef CONFIG_PERF_EVENTS
 apicinterrupt LOCAL_PENDING_VECTOR \
 apicinterrupt LOCAL_PENDING_VECTOR \
 	perf_pending_interrupt smp_perf_pending_interrupt
 	perf_pending_interrupt smp_perf_pending_interrupt
 #endif
 #endif

+ 1 - 1
arch/x86/kernel/irqinit.c

@@ -208,7 +208,7 @@ static void __init apic_intr_init(void)
 	alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
 	alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
 
 
 	/* Performance monitoring interrupts: */
 	/* Performance monitoring interrupts: */
-# ifdef CONFIG_PERF_COUNTERS
+# ifdef CONFIG_PERF_EVENTS
 	alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt);
 	alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt);
 # endif
 # endif
 
 

+ 1 - 1
arch/x86/kernel/syscall_table_32.S

@@ -335,4 +335,4 @@ ENTRY(sys_call_table)
 	.long sys_preadv
 	.long sys_preadv
 	.long sys_pwritev
 	.long sys_pwritev
 	.long sys_rt_tgsigqueueinfo	/* 335 */
 	.long sys_rt_tgsigqueueinfo	/* 335 */
-	.long sys_perf_counter_open
+	.long sys_perf_event_open

+ 4 - 4
arch/x86/mm/fault.c

@@ -10,7 +10,7 @@
 #include <linux/bootmem.h>		/* max_low_pfn			*/
 #include <linux/bootmem.h>		/* max_low_pfn			*/
 #include <linux/kprobes.h>		/* __kprobes, ...		*/
 #include <linux/kprobes.h>		/* __kprobes, ...		*/
 #include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
 #include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
-#include <linux/perf_counter.h>		/* perf_swcounter_event		*/
+#include <linux/perf_event.h>		/* perf_sw_event		*/
 
 
 #include <asm/traps.h>			/* dotraplinkage, ...		*/
 #include <asm/traps.h>			/* dotraplinkage, ...		*/
 #include <asm/pgalloc.h>		/* pgd_*(), ...			*/
 #include <asm/pgalloc.h>		/* pgd_*(), ...			*/
@@ -1017,7 +1017,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
 	if (unlikely(error_code & PF_RSVD))
 	if (unlikely(error_code & PF_RSVD))
 		pgtable_bad(regs, error_code, address);
 		pgtable_bad(regs, error_code, address);
 
 
-	perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
 
 
 	/*
 	/*
 	 * If we're in an interrupt, have no user context or are running
 	 * If we're in an interrupt, have no user context or are running
@@ -1114,11 +1114,11 @@ good_area:
 
 
 	if (fault & VM_FAULT_MAJOR) {
 	if (fault & VM_FAULT_MAJOR) {
 		tsk->maj_flt++;
 		tsk->maj_flt++;
-		perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
 				     regs, address);
 				     regs, address);
 	} else {
 	} else {
 		tsk->min_flt++;
 		tsk->min_flt++;
-		perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
 				     regs, address);
 				     regs, address);
 	}
 	}
 
 

+ 2 - 2
arch/x86/oprofile/op_model_ppro.c

@@ -234,11 +234,11 @@ static void arch_perfmon_setup_counters(void)
 	if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 &&
 	if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 &&
 		current_cpu_data.x86_model == 15) {
 		current_cpu_data.x86_model == 15) {
 		eax.split.version_id = 2;
 		eax.split.version_id = 2;
-		eax.split.num_counters = 2;
+		eax.split.num_events = 2;
 		eax.split.bit_width = 40;
 		eax.split.bit_width = 40;
 	}
 	}
 
 
-	num_counters = eax.split.num_counters;
+	num_counters = eax.split.num_events;
 
 
 	op_arch_perfmon_spec.num_counters = num_counters;
 	op_arch_perfmon_spec.num_counters = num_counters;
 	op_arch_perfmon_spec.num_controls = num_counters;
 	op_arch_perfmon_spec.num_controls = num_counters;

+ 1 - 1
arch/x86/oprofile/op_x86_model.h

@@ -13,7 +13,7 @@
 #define OP_X86_MODEL_H
 #define OP_X86_MODEL_H
 
 
 #include <asm/types.h>
 #include <asm/types.h>
-#include <asm/perf_counter.h>
+#include <asm/perf_event.h>
 
 
 struct op_msr {
 struct op_msr {
 	unsigned long	addr;
 	unsigned long	addr;

+ 2 - 2
drivers/char/sysrq.c

@@ -26,7 +26,7 @@
 #include <linux/proc_fs.h>
 #include <linux/proc_fs.h>
 #include <linux/nmi.h>
 #include <linux/nmi.h>
 #include <linux/quotaops.h>
 #include <linux/quotaops.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/suspend.h>
 #include <linux/suspend.h>
@@ -252,7 +252,7 @@ static void sysrq_handle_showregs(int key, struct tty_struct *tty)
 	struct pt_regs *regs = get_irq_regs();
 	struct pt_regs *regs = get_irq_regs();
 	if (regs)
 	if (regs)
 		show_regs(regs);
 		show_regs(regs);
-	perf_counter_print_debug();
+	perf_event_print_debug();
 }
 }
 static struct sysrq_key_op sysrq_showregs_op = {
 static struct sysrq_key_op sysrq_showregs_op = {
 	.handler	= sysrq_handle_showregs,
 	.handler	= sysrq_handle_showregs,

+ 3 - 3
fs/exec.c

@@ -33,7 +33,7 @@
 #include <linux/string.h>
 #include <linux/string.h>
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/pagemap.h>
 #include <linux/pagemap.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
 #include <linux/highmem.h>
 #include <linux/highmem.h>
 #include <linux/spinlock.h>
 #include <linux/spinlock.h>
 #include <linux/key.h>
 #include <linux/key.h>
@@ -923,7 +923,7 @@ void set_task_comm(struct task_struct *tsk, char *buf)
 	task_lock(tsk);
 	task_lock(tsk);
 	strlcpy(tsk->comm, buf, sizeof(tsk->comm));
 	strlcpy(tsk->comm, buf, sizeof(tsk->comm));
 	task_unlock(tsk);
 	task_unlock(tsk);
-	perf_counter_comm(tsk);
+	perf_event_comm(tsk);
 }
 }
 
 
 int flush_old_exec(struct linux_binprm * bprm)
 int flush_old_exec(struct linux_binprm * bprm)
@@ -997,7 +997,7 @@ int flush_old_exec(struct linux_binprm * bprm)
 	 * security domain:
 	 * security domain:
 	 */
 	 */
 	if (!get_dumpable(current->mm))
 	if (!get_dumpable(current->mm))
-		perf_counter_exit_task(current);
+		perf_event_exit_task(current);
 
 
 	/* An exec changes our domain. We are no longer part of the thread
 	/* An exec changes our domain. We are no longer part of the thread
 	   group */
 	   group */

+ 2 - 2
include/asm-generic/unistd.h

@@ -620,8 +620,8 @@ __SYSCALL(__NR_move_pages, sys_move_pages)
 
 
 #define __NR_rt_tgsigqueueinfo 240
 #define __NR_rt_tgsigqueueinfo 240
 __SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo)
 __SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo)
-#define __NR_perf_counter_open 241
-__SYSCALL(__NR_perf_counter_open, sys_perf_counter_open)
+#define __NR_perf_event_open 241
+__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
 
 
 #undef __NR_syscalls
 #undef __NR_syscalls
 #define __NR_syscalls 242
 #define __NR_syscalls 242

+ 7 - 7
include/linux/init_task.h

@@ -106,13 +106,13 @@ extern struct group_info init_groups;
 
 
 extern struct cred init_cred;
 extern struct cred init_cred;
 
 
-#ifdef CONFIG_PERF_COUNTERS
-# define INIT_PERF_COUNTERS(tsk)					\
-	.perf_counter_mutex = 						\
-		 __MUTEX_INITIALIZER(tsk.perf_counter_mutex),		\
-	.perf_counter_list = LIST_HEAD_INIT(tsk.perf_counter_list),
+#ifdef CONFIG_PERF_EVENTS
+# define INIT_PERF_EVENTS(tsk)					\
+	.perf_event_mutex = 						\
+		 __MUTEX_INITIALIZER(tsk.perf_event_mutex),		\
+	.perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list),
 #else
 #else
-# define INIT_PERF_COUNTERS(tsk)
+# define INIT_PERF_EVENTS(tsk)
 #endif
 #endif
 
 
 /*
 /*
@@ -178,7 +178,7 @@ extern struct cred init_cred;
 	},								\
 	},								\
 	.dirties = INIT_PROP_LOCAL_SINGLE(dirties),			\
 	.dirties = INIT_PROP_LOCAL_SINGLE(dirties),			\
 	INIT_IDS							\
 	INIT_IDS							\
-	INIT_PERF_COUNTERS(tsk)						\
+	INIT_PERF_EVENTS(tsk)						\
 	INIT_TRACE_IRQFLAGS						\
 	INIT_TRACE_IRQFLAGS						\
 	INIT_LOCKDEP							\
 	INIT_LOCKDEP							\
 	INIT_FTRACE_GRAPH						\
 	INIT_FTRACE_GRAPH						\

+ 40 - 457
include/linux/perf_counter.h

@@ -1,5 +1,9 @@
 /*
 /*
- *  Performance counters:
+ *  NOTE: this file will be removed in a future kernel release, it is
+ *  provided as a courtesy copy of user-space code that relies on the
+ *  old (pre-rename) symbols and constants.
+ *
+ *  Performance events:
  *
  *
  *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
  *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
  *    Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
  *    Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
@@ -131,19 +135,19 @@ enum perf_counter_sample_format {
  * as specified by attr.read_format:
  * as specified by attr.read_format:
  *
  *
  * struct read_format {
  * struct read_format {
- * 	{ u64		value;
- * 	  { u64		time_enabled; } && PERF_FORMAT_ENABLED
- * 	  { u64		time_running; } && PERF_FORMAT_RUNNING
- * 	  { u64		id;           } && PERF_FORMAT_ID
- * 	} && !PERF_FORMAT_GROUP
+ *	{ u64		value;
+ *	  { u64		time_enabled; } && PERF_FORMAT_ENABLED
+ *	  { u64		time_running; } && PERF_FORMAT_RUNNING
+ *	  { u64		id;           } && PERF_FORMAT_ID
+ *	} && !PERF_FORMAT_GROUP
  *
  *
- * 	{ u64		nr;
- * 	  { u64		time_enabled; } && PERF_FORMAT_ENABLED
- * 	  { u64		time_running; } && PERF_FORMAT_RUNNING
- * 	  { u64		value;
- * 	    { u64	id;           } && PERF_FORMAT_ID
- * 	  }		cntr[nr];
- * 	} && PERF_FORMAT_GROUP
+ *	{ u64		nr;
+ *	  { u64		time_enabled; } && PERF_FORMAT_ENABLED
+ *	  { u64		time_running; } && PERF_FORMAT_RUNNING
+ *	  { u64		value;
+ *	    { u64	id;           } && PERF_FORMAT_ID
+ *	  }		cntr[nr];
+ *	} && PERF_FORMAT_GROUP
  * };
  * };
  */
  */
 enum perf_counter_read_format {
 enum perf_counter_read_format {
@@ -314,9 +318,9 @@ enum perf_event_type {
 
 
 	/*
 	/*
 	 * struct {
 	 * struct {
-	 * 	struct perf_event_header	header;
-	 * 	u64				id;
-	 * 	u64				lost;
+	 *	struct perf_event_header	header;
+	 *	u64				id;
+	 *	u64				lost;
 	 * };
 	 * };
 	 */
 	 */
 	PERF_EVENT_LOST			= 2,
 	PERF_EVENT_LOST			= 2,
@@ -364,10 +368,10 @@ enum perf_event_type {
 
 
 	/*
 	/*
 	 * struct {
 	 * struct {
-	 * 	struct perf_event_header	header;
-	 * 	u32				pid, tid;
+	 *	struct perf_event_header	header;
+	 *	u32				pid, tid;
 	 *
 	 *
-	 * 	struct read_format		values;
+	 *	struct read_format		values;
 	 * };
 	 * };
 	 */
 	 */
 	PERF_EVENT_READ			= 8,
 	PERF_EVENT_READ			= 8,
@@ -383,23 +387,23 @@ enum perf_event_type {
 	 *	{ u64			id;	  } && PERF_SAMPLE_ID
 	 *	{ u64			id;	  } && PERF_SAMPLE_ID
 	 *	{ u64			stream_id;} && PERF_SAMPLE_STREAM_ID
 	 *	{ u64			stream_id;} && PERF_SAMPLE_STREAM_ID
 	 *	{ u32			cpu, res; } && PERF_SAMPLE_CPU
 	 *	{ u32			cpu, res; } && PERF_SAMPLE_CPU
-	 * 	{ u64			period;   } && PERF_SAMPLE_PERIOD
+	 *	{ u64			period;   } && PERF_SAMPLE_PERIOD
 	 *
 	 *
 	 *	{ struct read_format	values;	  } && PERF_SAMPLE_READ
 	 *	{ struct read_format	values;	  } && PERF_SAMPLE_READ
 	 *
 	 *
 	 *	{ u64			nr,
 	 *	{ u64			nr,
 	 *	  u64			ips[nr];  } && PERF_SAMPLE_CALLCHAIN
 	 *	  u64			ips[nr];  } && PERF_SAMPLE_CALLCHAIN
 	 *
 	 *
-	 * 	#
-	 * 	# The RAW record below is opaque data wrt the ABI
-	 * 	#
-	 * 	# That is, the ABI doesn't make any promises wrt to
-	 * 	# the stability of its content, it may vary depending
-	 * 	# on event, hardware, kernel version and phase of
-	 * 	# the moon.
-	 * 	#
-	 * 	# In other words, PERF_SAMPLE_RAW contents are not an ABI.
-	 * 	#
+	 *	#
+	 *	# The RAW record below is opaque data wrt the ABI
+	 *	#
+	 *	# That is, the ABI doesn't make any promises wrt to
+	 *	# the stability of its content, it may vary depending
+	 *	# on event, hardware, kernel version and phase of
+	 *	# the moon.
+	 *	#
+	 *	# In other words, PERF_SAMPLE_RAW contents are not an ABI.
+	 *	#
 	 *
 	 *
 	 *	{ u32			size;
 	 *	{ u32			size;
 	 *	  char                  data[size];}&& PERF_SAMPLE_RAW
 	 *	  char                  data[size];}&& PERF_SAMPLE_RAW
@@ -422,437 +426,16 @@ enum perf_callchain_context {
 	PERF_CONTEXT_MAX		= (__u64)-4095,
 	PERF_CONTEXT_MAX		= (__u64)-4095,
 };
 };
 
 
-#define PERF_FLAG_FD_NO_GROUP	(1U << 0)
-#define PERF_FLAG_FD_OUTPUT	(1U << 1)
+#define PERF_FLAG_FD_NO_GROUP		(1U << 0)
+#define PERF_FLAG_FD_OUTPUT		(1U << 1)
 
 
-#ifdef __KERNEL__
 /*
 /*
- * Kernel-internal data types and definitions:
- */
-
-#ifdef CONFIG_PERF_COUNTERS
-# include <asm/perf_counter.h>
-#endif
-
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/rculist.h>
-#include <linux/rcupdate.h>
-#include <linux/spinlock.h>
-#include <linux/hrtimer.h>
-#include <linux/fs.h>
-#include <linux/pid_namespace.h>
-#include <asm/atomic.h>
-
-#define PERF_MAX_STACK_DEPTH		255
-
-struct perf_callchain_entry {
-	__u64				nr;
-	__u64				ip[PERF_MAX_STACK_DEPTH];
-};
-
-struct perf_raw_record {
-	u32				size;
-	void				*data;
-};
-
-struct task_struct;
-
-/**
- * struct hw_perf_counter - performance counter hardware details:
+ * In case some app still references the old symbols:
  */
  */
-struct hw_perf_counter {
-#ifdef CONFIG_PERF_COUNTERS
-	union {
-		struct { /* hardware */
-			u64		config;
-			unsigned long	config_base;
-			unsigned long	counter_base;
-			int		idx;
-		};
-		union { /* software */
-			atomic64_t	count;
-			struct hrtimer	hrtimer;
-		};
-	};
-	atomic64_t			prev_count;
-	u64				sample_period;
-	u64				last_period;
-	atomic64_t			period_left;
-	u64				interrupts;
-
-	u64				freq_count;
-	u64				freq_interrupts;
-	u64				freq_stamp;
-#endif
-};
-
-struct perf_counter;
-
-/**
- * struct pmu - generic performance monitoring unit
- */
-struct pmu {
-	int (*enable)			(struct perf_counter *counter);
-	void (*disable)			(struct perf_counter *counter);
-	void (*read)			(struct perf_counter *counter);
-	void (*unthrottle)		(struct perf_counter *counter);
-};
-
-/**
- * enum perf_counter_active_state - the states of a counter
- */
-enum perf_counter_active_state {
-	PERF_COUNTER_STATE_ERROR	= -2,
-	PERF_COUNTER_STATE_OFF		= -1,
-	PERF_COUNTER_STATE_INACTIVE	=  0,
-	PERF_COUNTER_STATE_ACTIVE	=  1,
-};
-
-struct file;
 
 
-struct perf_mmap_data {
-	struct rcu_head			rcu_head;
-	int				nr_pages;	/* nr of data pages  */
-	int				writable;	/* are we writable   */
-	int				nr_locked;	/* nr pages mlocked  */
+#define __NR_perf_counter_open		__NR_perf_event_open
 
 
-	atomic_t			poll;		/* POLL_ for wakeups */
-	atomic_t			events;		/* event limit       */
+#define PR_TASK_PERF_COUNTERS_DISABLE	PR_TASK_PERF_EVENTS_DISABLE
+#define PR_TASK_PERF_COUNTERS_ENABLE	PR_TASK_PERF_EVENTS_ENABLE
 
 
-	atomic_long_t			head;		/* write position    */
-	atomic_long_t			done_head;	/* completed head    */
-
-	atomic_t			lock;		/* concurrent writes */
-	atomic_t			wakeup;		/* needs a wakeup    */
-	atomic_t			lost;		/* nr records lost   */
-
-	long				watermark;	/* wakeup watermark  */
-
-	struct perf_counter_mmap_page   *user_page;
-	void				*data_pages[0];
-};
-
-struct perf_pending_entry {
-	struct perf_pending_entry *next;
-	void (*func)(struct perf_pending_entry *);
-};
-
-/**
- * struct perf_counter - performance counter kernel representation:
- */
-struct perf_counter {
-#ifdef CONFIG_PERF_COUNTERS
-	struct list_head		list_entry;
-	struct list_head		event_entry;
-	struct list_head		sibling_list;
-	int				nr_siblings;
-	struct perf_counter		*group_leader;
-	struct perf_counter		*output;
-	const struct pmu		*pmu;
-
-	enum perf_counter_active_state	state;
-	atomic64_t			count;
-
-	/*
-	 * These are the total time in nanoseconds that the counter
-	 * has been enabled (i.e. eligible to run, and the task has
-	 * been scheduled in, if this is a per-task counter)
-	 * and running (scheduled onto the CPU), respectively.
-	 *
-	 * They are computed from tstamp_enabled, tstamp_running and
-	 * tstamp_stopped when the counter is in INACTIVE or ACTIVE state.
-	 */
-	u64				total_time_enabled;
-	u64				total_time_running;
-
-	/*
-	 * These are timestamps used for computing total_time_enabled
-	 * and total_time_running when the counter is in INACTIVE or
-	 * ACTIVE state, measured in nanoseconds from an arbitrary point
-	 * in time.
-	 * tstamp_enabled: the notional time when the counter was enabled
-	 * tstamp_running: the notional time when the counter was scheduled on
-	 * tstamp_stopped: in INACTIVE state, the notional time when the
-	 *	counter was scheduled off.
-	 */
-	u64				tstamp_enabled;
-	u64				tstamp_running;
-	u64				tstamp_stopped;
-
-	struct perf_counter_attr	attr;
-	struct hw_perf_counter		hw;
-
-	struct perf_counter_context	*ctx;
-	struct file			*filp;
-
-	/*
-	 * These accumulate total time (in nanoseconds) that children
-	 * counters have been enabled and running, respectively.
-	 */
-	atomic64_t			child_total_time_enabled;
-	atomic64_t			child_total_time_running;
-
-	/*
-	 * Protect attach/detach and child_list:
-	 */
-	struct mutex			child_mutex;
-	struct list_head		child_list;
-	struct perf_counter		*parent;
-
-	int				oncpu;
-	int				cpu;
-
-	struct list_head		owner_entry;
-	struct task_struct		*owner;
-
-	/* mmap bits */
-	struct mutex			mmap_mutex;
-	atomic_t			mmap_count;
-	struct perf_mmap_data		*data;
-
-	/* poll related */
-	wait_queue_head_t		waitq;
-	struct fasync_struct		*fasync;
-
-	/* delayed work for NMIs and such */
-	int				pending_wakeup;
-	int				pending_kill;
-	int				pending_disable;
-	struct perf_pending_entry	pending;
-
-	atomic_t			event_limit;
-
-	void (*destroy)(struct perf_counter *);
-	struct rcu_head			rcu_head;
-
-	struct pid_namespace		*ns;
-	u64				id;
-#endif
-};
-
-/**
- * struct perf_counter_context - counter context structure
- *
- * Used as a container for task counters and CPU counters as well:
- */
-struct perf_counter_context {
-	/*
-	 * Protect the states of the counters in the list,
-	 * nr_active, and the list:
-	 */
-	spinlock_t			lock;
-	/*
-	 * Protect the list of counters.  Locking either mutex or lock
-	 * is sufficient to ensure the list doesn't change; to change
-	 * the list you need to lock both the mutex and the spinlock.
-	 */
-	struct mutex			mutex;
-
-	struct list_head		counter_list;
-	struct list_head		event_list;
-	int				nr_counters;
-	int				nr_active;
-	int				is_active;
-	int				nr_stat;
-	atomic_t			refcount;
-	struct task_struct		*task;
-
-	/*
-	 * Context clock, runs when context enabled.
-	 */
-	u64				time;
-	u64				timestamp;
-
-	/*
-	 * These fields let us detect when two contexts have both
-	 * been cloned (inherited) from a common ancestor.
-	 */
-	struct perf_counter_context	*parent_ctx;
-	u64				parent_gen;
-	u64				generation;
-	int				pin_count;
-	struct rcu_head			rcu_head;
-};
-
-/**
- * struct perf_counter_cpu_context - per cpu counter context structure
- */
-struct perf_cpu_context {
-	struct perf_counter_context	ctx;
-	struct perf_counter_context	*task_ctx;
-	int				active_oncpu;
-	int				max_pertask;
-	int				exclusive;
-
-	/*
-	 * Recursion avoidance:
-	 *
-	 * task, softirq, irq, nmi context
-	 */
-	int				recursion[4];
-};
-
-struct perf_output_handle {
-	struct perf_counter	*counter;
-	struct perf_mmap_data	*data;
-	unsigned long		head;
-	unsigned long		offset;
-	int			nmi;
-	int			sample;
-	int			locked;
-	unsigned long		flags;
-};
-
-#ifdef CONFIG_PERF_COUNTERS
-
-/*
- * Set by architecture code:
- */
-extern int perf_max_counters;
-
-extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter);
-
-extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
-extern void perf_counter_task_sched_out(struct task_struct *task,
-					struct task_struct *next, int cpu);
-extern void perf_counter_task_tick(struct task_struct *task, int cpu);
-extern int perf_counter_init_task(struct task_struct *child);
-extern void perf_counter_exit_task(struct task_struct *child);
-extern void perf_counter_free_task(struct task_struct *task);
-extern void set_perf_counter_pending(void);
-extern void perf_counter_do_pending(void);
-extern void perf_counter_print_debug(void);
-extern void __perf_disable(void);
-extern bool __perf_enable(void);
-extern void perf_disable(void);
-extern void perf_enable(void);
-extern int perf_counter_task_disable(void);
-extern int perf_counter_task_enable(void);
-extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
-	       struct perf_cpu_context *cpuctx,
-	       struct perf_counter_context *ctx, int cpu);
-extern void perf_counter_update_userpage(struct perf_counter *counter);
-
-struct perf_sample_data {
-	u64				type;
-
-	u64				ip;
-	struct {
-		u32	pid;
-		u32	tid;
-	}				tid_entry;
-	u64				time;
-	u64				addr;
-	u64				id;
-	u64				stream_id;
-	struct {
-		u32	cpu;
-		u32	reserved;
-	}				cpu_entry;
-	u64				period;
-	struct perf_callchain_entry	*callchain;
-	struct perf_raw_record		*raw;
-};
-
-extern void perf_output_sample(struct perf_output_handle *handle,
-			       struct perf_event_header *header,
-			       struct perf_sample_data *data,
-			       struct perf_counter *counter);
-extern void perf_prepare_sample(struct perf_event_header *header,
-				struct perf_sample_data *data,
-				struct perf_counter *counter,
-				struct pt_regs *regs);
-
-extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
-				 struct perf_sample_data *data,
-				 struct pt_regs *regs);
-
-/*
- * Return 1 for a software counter, 0 for a hardware counter
- */
-static inline int is_software_counter(struct perf_counter *counter)
-{
-	return (counter->attr.type != PERF_TYPE_RAW) &&
-		(counter->attr.type != PERF_TYPE_HARDWARE) &&
-		(counter->attr.type != PERF_TYPE_HW_CACHE);
-}
-
-extern atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX];
-
-extern void __perf_swcounter_event(u32, u64, int, struct pt_regs *, u64);
-
-static inline void
-perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
-{
-	if (atomic_read(&perf_swcounter_enabled[event]))
-		__perf_swcounter_event(event, nr, nmi, regs, addr);
-}
-
-extern void __perf_counter_mmap(struct vm_area_struct *vma);
-
-static inline void perf_counter_mmap(struct vm_area_struct *vma)
-{
-	if (vma->vm_flags & VM_EXEC)
-		__perf_counter_mmap(vma);
-}
-
-extern void perf_counter_comm(struct task_struct *tsk);
-extern void perf_counter_fork(struct task_struct *tsk);
-
-extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
-
-extern int sysctl_perf_counter_paranoid;
-extern int sysctl_perf_counter_mlock;
-extern int sysctl_perf_counter_sample_rate;
-
-extern void perf_counter_init(void);
-extern void perf_tpcounter_event(int event_id, u64 addr, u64 count,
-				 void *record, int entry_size);
-
-#ifndef perf_misc_flags
-#define perf_misc_flags(regs)	(user_mode(regs) ? PERF_EVENT_MISC_USER : \
-				 PERF_EVENT_MISC_KERNEL)
-#define perf_instruction_pointer(regs)	instruction_pointer(regs)
-#endif
-
-extern int perf_output_begin(struct perf_output_handle *handle,
-			     struct perf_counter *counter, unsigned int size,
-			     int nmi, int sample);
-extern void perf_output_end(struct perf_output_handle *handle);
-extern void perf_output_copy(struct perf_output_handle *handle,
-			     const void *buf, unsigned int len);
-#else
-static inline void
-perf_counter_task_sched_in(struct task_struct *task, int cpu)		{ }
-static inline void
-perf_counter_task_sched_out(struct task_struct *task,
-			    struct task_struct *next, int cpu)		{ }
-static inline void
-perf_counter_task_tick(struct task_struct *task, int cpu)		{ }
-static inline int perf_counter_init_task(struct task_struct *child)	{ return 0; }
-static inline void perf_counter_exit_task(struct task_struct *child)	{ }
-static inline void perf_counter_free_task(struct task_struct *task)	{ }
-static inline void perf_counter_do_pending(void)			{ }
-static inline void perf_counter_print_debug(void)			{ }
-static inline void perf_disable(void)					{ }
-static inline void perf_enable(void)					{ }
-static inline int perf_counter_task_disable(void)	{ return -EINVAL; }
-static inline int perf_counter_task_enable(void)	{ return -EINVAL; }
-
-static inline void
-perf_swcounter_event(u32 event, u64 nr, int nmi,
-		     struct pt_regs *regs, u64 addr)			{ }
-
-static inline void perf_counter_mmap(struct vm_area_struct *vma)	{ }
-static inline void perf_counter_comm(struct task_struct *tsk)		{ }
-static inline void perf_counter_fork(struct task_struct *tsk)		{ }
-static inline void perf_counter_init(void)				{ }
-
-#endif
-
-#define perf_output_put(handle, x) \
-	perf_output_copy((handle), &(x), sizeof(x))
-
-#endif /* __KERNEL__ */
 #endif /* _LINUX_PERF_COUNTER_H */
 #endif /* _LINUX_PERF_COUNTER_H */

+ 858 - 0
include/linux/perf_event.h

@@ -0,0 +1,858 @@
+/*
+ * Performance events:
+ *
+ *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
+ *    Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
+ *    Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra
+ *
+ * Data type definitions, declarations, prototypes.
+ *
+ *    Started by: Thomas Gleixner and Ingo Molnar
+ *
+ * For licencing details see kernel-base/COPYING
+ */
+#ifndef _LINUX_PERF_EVENT_H
+#define _LINUX_PERF_EVENT_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <asm/byteorder.h>
+
+/*
+ * User-space ABI bits:
+ */
+
+/*
+ * attr.type
+ */
+enum perf_type_id {
+	PERF_TYPE_HARDWARE			= 0,
+	PERF_TYPE_SOFTWARE			= 1,
+	PERF_TYPE_TRACEPOINT			= 2,
+	PERF_TYPE_HW_CACHE			= 3,
+	PERF_TYPE_RAW				= 4,
+
+	PERF_TYPE_MAX,				/* non-ABI */
+};
+
+/*
+ * Generalized performance event event_id types, used by the
+ * attr.event_id parameter of the sys_perf_event_open()
+ * syscall:
+ */
+enum perf_hw_id {
+	/*
+	 * Common hardware events, generalized by the kernel:
+	 */
+	PERF_COUNT_HW_CPU_CYCLES		= 0,
+	PERF_COUNT_HW_INSTRUCTIONS		= 1,
+	PERF_COUNT_HW_CACHE_REFERENCES		= 2,
+	PERF_COUNT_HW_CACHE_MISSES		= 3,
+	PERF_COUNT_HW_BRANCH_INSTRUCTIONS	= 4,
+	PERF_COUNT_HW_BRANCH_MISSES		= 5,
+	PERF_COUNT_HW_BUS_CYCLES		= 6,
+
+	PERF_COUNT_HW_MAX,			/* non-ABI */
+};
+
+/*
+ * Generalized hardware cache events:
+ *
+ *       { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x
+ *       { read, write, prefetch } x
+ *       { accesses, misses }
+ */
+enum perf_hw_cache_id {
+	PERF_COUNT_HW_CACHE_L1D			= 0,
+	PERF_COUNT_HW_CACHE_L1I			= 1,
+	PERF_COUNT_HW_CACHE_LL			= 2,
+	PERF_COUNT_HW_CACHE_DTLB		= 3,
+	PERF_COUNT_HW_CACHE_ITLB		= 4,
+	PERF_COUNT_HW_CACHE_BPU			= 5,
+
+	PERF_COUNT_HW_CACHE_MAX,		/* non-ABI */
+};
+
+enum perf_hw_cache_op_id {
+	PERF_COUNT_HW_CACHE_OP_READ		= 0,
+	PERF_COUNT_HW_CACHE_OP_WRITE		= 1,
+	PERF_COUNT_HW_CACHE_OP_PREFETCH		= 2,
+
+	PERF_COUNT_HW_CACHE_OP_MAX,		/* non-ABI */
+};
+
+enum perf_hw_cache_op_result_id {
+	PERF_COUNT_HW_CACHE_RESULT_ACCESS	= 0,
+	PERF_COUNT_HW_CACHE_RESULT_MISS		= 1,
+
+	PERF_COUNT_HW_CACHE_RESULT_MAX,		/* non-ABI */
+};
+
+/*
+ * Special "software" events provided by the kernel, even if the hardware
+ * does not support performance events. These events measure various
+ * physical and sw events of the kernel (and allow the profiling of them as
+ * well):
+ */
+enum perf_sw_ids {
+	PERF_COUNT_SW_CPU_CLOCK			= 0,
+	PERF_COUNT_SW_TASK_CLOCK		= 1,
+	PERF_COUNT_SW_PAGE_FAULTS		= 2,
+	PERF_COUNT_SW_CONTEXT_SWITCHES		= 3,
+	PERF_COUNT_SW_CPU_MIGRATIONS		= 4,
+	PERF_COUNT_SW_PAGE_FAULTS_MIN		= 5,
+	PERF_COUNT_SW_PAGE_FAULTS_MAJ		= 6,
+
+	PERF_COUNT_SW_MAX,			/* non-ABI */
+};
+
+/*
+ * Bits that can be set in attr.sample_type to request information
+ * in the overflow packets.
+ */
+enum perf_event_sample_format {
+	PERF_SAMPLE_IP				= 1U << 0,
+	PERF_SAMPLE_TID				= 1U << 1,
+	PERF_SAMPLE_TIME			= 1U << 2,
+	PERF_SAMPLE_ADDR			= 1U << 3,
+	PERF_SAMPLE_READ			= 1U << 4,
+	PERF_SAMPLE_CALLCHAIN			= 1U << 5,
+	PERF_SAMPLE_ID				= 1U << 6,
+	PERF_SAMPLE_CPU				= 1U << 7,
+	PERF_SAMPLE_PERIOD			= 1U << 8,
+	PERF_SAMPLE_STREAM_ID			= 1U << 9,
+	PERF_SAMPLE_RAW				= 1U << 10,
+
+	PERF_SAMPLE_MAX = 1U << 11,		/* non-ABI */
+};
+
+/*
+ * The format of the data returned by read() on a perf event fd,
+ * as specified by attr.read_format:
+ *
+ * struct read_format {
+ *	{ u64		value;
+ *	  { u64		time_enabled; } && PERF_FORMAT_ENABLED
+ *	  { u64		time_running; } && PERF_FORMAT_RUNNING
+ *	  { u64		id;           } && PERF_FORMAT_ID
+ *	} && !PERF_FORMAT_GROUP
+ *
+ *	{ u64		nr;
+ *	  { u64		time_enabled; } && PERF_FORMAT_ENABLED
+ *	  { u64		time_running; } && PERF_FORMAT_RUNNING
+ *	  { u64		value;
+ *	    { u64	id;           } && PERF_FORMAT_ID
+ *	  }		cntr[nr];
+ *	} && PERF_FORMAT_GROUP
+ * };
+ */
+enum perf_event_read_format {
+	PERF_FORMAT_TOTAL_TIME_ENABLED		= 1U << 0,
+	PERF_FORMAT_TOTAL_TIME_RUNNING		= 1U << 1,
+	PERF_FORMAT_ID				= 1U << 2,
+	PERF_FORMAT_GROUP			= 1U << 3,
+
+	PERF_FORMAT_MAX = 1U << 4,		/* non-ABI */
+};
+
+#define PERF_ATTR_SIZE_VER0	64	/* sizeof first published struct */
+
+/*
+ * Hardware event_id to monitor via a performance monitoring event:
+ */
+struct perf_event_attr {
+
+	/*
+	 * Major type: hardware/software/tracepoint/etc.
+	 */
+	__u32			type;
+
+	/*
+	 * Size of the attr structure, for fwd/bwd compat.
+	 */
+	__u32			size;
+
+	/*
+	 * Type specific configuration information.
+	 */
+	__u64			config;
+
+	union {
+		__u64		sample_period;
+		__u64		sample_freq;
+	};
+
+	__u64			sample_type;
+	__u64			read_format;
+
+	__u64			disabled       :  1, /* off by default        */
+				inherit	       :  1, /* children inherit it   */
+				pinned	       :  1, /* must always be on PMU */
+				exclusive      :  1, /* only group on PMU     */
+				exclude_user   :  1, /* don't count user      */
+				exclude_kernel :  1, /* ditto kernel          */
+				exclude_hv     :  1, /* ditto hypervisor      */
+				exclude_idle   :  1, /* don't count when idle */
+				mmap           :  1, /* include mmap data     */
+				comm	       :  1, /* include comm data     */
+				freq           :  1, /* use freq, not period  */
+				inherit_stat   :  1, /* per task counts       */
+				enable_on_exec :  1, /* next exec enables     */
+				task           :  1, /* trace fork/exit       */
+				watermark      :  1, /* wakeup_watermark      */
+
+				__reserved_1   : 49;
+
+	union {
+		__u32		wakeup_events;	  /* wakeup every n events */
+		__u32		wakeup_watermark; /* bytes before wakeup   */
+	};
+	__u32			__reserved_2;
+
+	__u64			__reserved_3;
+};
+
+/*
+ * Ioctls that can be done on a perf event fd:
+ */
+#define PERF_EVENT_IOC_ENABLE		_IO ('$', 0)
+#define PERF_EVENT_IOC_DISABLE		_IO ('$', 1)
+#define PERF_EVENT_IOC_REFRESH		_IO ('$', 2)
+#define PERF_EVENT_IOC_RESET		_IO ('$', 3)
+#define PERF_EVENT_IOC_PERIOD		_IOW('$', 4, u64)
+#define PERF_EVENT_IOC_SET_OUTPUT	_IO ('$', 5)
+
+enum perf_event_ioc_flags {
+	PERF_IOC_FLAG_GROUP		= 1U << 0,
+};
+
+/*
+ * Structure of the page that can be mapped via mmap
+ */
+struct perf_event_mmap_page {
+	__u32	version;		/* version number of this structure */
+	__u32	compat_version;		/* lowest version this is compat with */
+
+	/*
+	 * Bits needed to read the hw events in user-space.
+	 *
+	 *   u32 seq;
+	 *   s64 count;
+	 *
+	 *   do {
+	 *     seq = pc->lock;
+	 *
+	 *     barrier()
+	 *     if (pc->index) {
+	 *       count = pmc_read(pc->index - 1);
+	 *       count += pc->offset;
+	 *     } else
+	 *       goto regular_read;
+	 *
+	 *     barrier();
+	 *   } while (pc->lock != seq);
+	 *
+	 * NOTE: for obvious reason this only works on self-monitoring
+	 *       processes.
+	 */
+	__u32	lock;			/* seqlock for synchronization */
+	__u32	index;			/* hardware event identifier */
+	__s64	offset;			/* add to hardware event value */
+	__u64	time_enabled;		/* time event active */
+	__u64	time_running;		/* time event on cpu */
+
+		/*
+		 * Hole for extension of the self monitor capabilities
+		 */
+
+	__u64	__reserved[123];	/* align to 1k */
+
+	/*
+	 * Control data for the mmap() data buffer.
+	 *
+	 * User-space reading the @data_head value should issue an rmb(), on
+	 * SMP capable platforms, after reading this value -- see
+	 * perf_event_wakeup().
+	 *
+	 * When the mapping is PROT_WRITE the @data_tail value should be
+	 * written by userspace to reflect the last read data. In this case
+	 * the kernel will not over-write unread data.
+	 */
+	__u64   data_head;		/* head in the data section */
+	__u64	data_tail;		/* user-space written tail */
+};
+
+#define PERF_RECORD_MISC_CPUMODE_MASK		(3 << 0)
+#define PERF_RECORD_MISC_CPUMODE_UNKNOWN		(0 << 0)
+#define PERF_RECORD_MISC_KERNEL			(1 << 0)
+#define PERF_RECORD_MISC_USER			(2 << 0)
+#define PERF_RECORD_MISC_HYPERVISOR		(3 << 0)
+
+struct perf_event_header {
+	__u32	type;
+	__u16	misc;
+	__u16	size;
+};
+
+enum perf_event_type {
+
+	/*
+	 * The MMAP events record the PROT_EXEC mappings so that we can
+	 * correlate userspace IPs to code. They have the following structure:
+	 *
+	 * struct {
+	 *	struct perf_event_header	header;
+	 *
+	 *	u32				pid, tid;
+	 *	u64				addr;
+	 *	u64				len;
+	 *	u64				pgoff;
+	 *	char				filename[];
+	 * };
+	 */
+	PERF_RECORD_MMAP			= 1,
+
+	/*
+	 * struct {
+	 *	struct perf_event_header	header;
+	 *	u64				id;
+	 *	u64				lost;
+	 * };
+	 */
+	PERF_RECORD_LOST			= 2,
+
+	/*
+	 * struct {
+	 *	struct perf_event_header	header;
+	 *
+	 *	u32				pid, tid;
+	 *	char				comm[];
+	 * };
+	 */
+	PERF_RECORD_COMM			= 3,
+
+	/*
+	 * struct {
+	 *	struct perf_event_header	header;
+	 *	u32				pid, ppid;
+	 *	u32				tid, ptid;
+	 *	u64				time;
+	 * };
+	 */
+	PERF_RECORD_EXIT			= 4,
+
+	/*
+	 * struct {
+	 *	struct perf_event_header	header;
+	 *	u64				time;
+	 *	u64				id;
+	 *	u64				stream_id;
+	 * };
+	 */
+	PERF_RECORD_THROTTLE		= 5,
+	PERF_RECORD_UNTHROTTLE		= 6,
+
+	/*
+	 * struct {
+	 *	struct perf_event_header	header;
+	 *	u32				pid, ppid;
+	 *	u32				tid, ptid;
+	 *	{ u64				time;     } && PERF_SAMPLE_TIME
+	 * };
+	 */
+	PERF_RECORD_FORK			= 7,
+
+	/*
+	 * struct {
+	 * 	struct perf_event_header	header;
+	 * 	u32				pid, tid;
+	 *
+	 * 	struct read_format		values;
+	 * };
+	 */
+	PERF_RECORD_READ			= 8,
+
+	/*
+	 * struct {
+	 *	struct perf_event_header	header;
+	 *
+	 *	{ u64			ip;	  } && PERF_SAMPLE_IP
+	 *	{ u32			pid, tid; } && PERF_SAMPLE_TID
+	 *	{ u64			time;     } && PERF_SAMPLE_TIME
+	 *	{ u64			addr;     } && PERF_SAMPLE_ADDR
+	 *	{ u64			id;	  } && PERF_SAMPLE_ID
+	 *	{ u64			stream_id;} && PERF_SAMPLE_STREAM_ID
+	 *	{ u32			cpu, res; } && PERF_SAMPLE_CPU
+	 *	{ u64			period;   } && PERF_SAMPLE_PERIOD
+	 *
+	 *	{ struct read_format	values;	  } && PERF_SAMPLE_READ
+	 *
+	 *	{ u64			nr,
+	 *	  u64			ips[nr];  } && PERF_SAMPLE_CALLCHAIN
+	 *
+	 *	#
+	 *	# The RAW record below is opaque data wrt the ABI
+	 *	#
+	 *	# That is, the ABI doesn't make any promises wrt to
+	 *	# the stability of its content, it may vary depending
+	 *	# on event, hardware, kernel version and phase of
+	 *	# the moon.
+	 *	#
+	 *	# In other words, PERF_SAMPLE_RAW contents are not an ABI.
+	 *	#
+	 *
+	 *	{ u32			size;
+	 *	  char                  data[size];}&& PERF_SAMPLE_RAW
+	 * };
+	 */
+	PERF_RECORD_SAMPLE		= 9,
+
+	PERF_RECORD_MAX,			/* non-ABI */
+};
+
+enum perf_callchain_context {
+	PERF_CONTEXT_HV			= (__u64)-32,
+	PERF_CONTEXT_KERNEL		= (__u64)-128,
+	PERF_CONTEXT_USER		= (__u64)-512,
+
+	PERF_CONTEXT_GUEST		= (__u64)-2048,
+	PERF_CONTEXT_GUEST_KERNEL	= (__u64)-2176,
+	PERF_CONTEXT_GUEST_USER		= (__u64)-2560,
+
+	PERF_CONTEXT_MAX		= (__u64)-4095,
+};
+
+#define PERF_FLAG_FD_NO_GROUP	(1U << 0)
+#define PERF_FLAG_FD_OUTPUT	(1U << 1)
+
+#ifdef __KERNEL__
+/*
+ * Kernel-internal data types and definitions:
+ */
+
+#ifdef CONFIG_PERF_EVENTS
+# include <asm/perf_event.h>
+#endif
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/spinlock.h>
+#include <linux/hrtimer.h>
+#include <linux/fs.h>
+#include <linux/pid_namespace.h>
+#include <asm/atomic.h>
+
+#define PERF_MAX_STACK_DEPTH		255
+
+struct perf_callchain_entry {
+	__u64				nr;
+	__u64				ip[PERF_MAX_STACK_DEPTH];
+};
+
+struct perf_raw_record {
+	u32				size;
+	void				*data;
+};
+
+struct task_struct;
+
+/**
+ * struct hw_perf_event - performance event hardware details:
+ */
+struct hw_perf_event {
+#ifdef CONFIG_PERF_EVENTS
+	union {
+		struct { /* hardware */
+			u64		config;
+			unsigned long	config_base;
+			unsigned long	event_base;
+			int		idx;
+		};
+		union { /* software */
+			atomic64_t	count;
+			struct hrtimer	hrtimer;
+		};
+	};
+	atomic64_t			prev_count;
+	u64				sample_period;
+	u64				last_period;
+	atomic64_t			period_left;
+	u64				interrupts;
+
+	u64				freq_count;
+	u64				freq_interrupts;
+	u64				freq_stamp;
+#endif
+};
+
+struct perf_event;
+
+/**
+ * struct pmu - generic performance monitoring unit
+ */
+struct pmu {
+	int (*enable)			(struct perf_event *event);
+	void (*disable)			(struct perf_event *event);
+	void (*read)			(struct perf_event *event);
+	void (*unthrottle)		(struct perf_event *event);
+};
+
+/**
+ * enum perf_event_active_state - the states of a event
+ */
+enum perf_event_active_state {
+	PERF_EVENT_STATE_ERROR		= -2,
+	PERF_EVENT_STATE_OFF		= -1,
+	PERF_EVENT_STATE_INACTIVE	=  0,
+	PERF_EVENT_STATE_ACTIVE		=  1,
+};
+
+struct file;
+
+struct perf_mmap_data {
+	struct rcu_head			rcu_head;
+	int				nr_pages;	/* nr of data pages  */
+	int				writable;	/* are we writable   */
+	int				nr_locked;	/* nr pages mlocked  */
+
+	atomic_t			poll;		/* POLL_ for wakeups */
+	atomic_t			events;		/* event_id limit       */
+
+	atomic_long_t			head;		/* write position    */
+	atomic_long_t			done_head;	/* completed head    */
+
+	atomic_t			lock;		/* concurrent writes */
+	atomic_t			wakeup;		/* needs a wakeup    */
+	atomic_t			lost;		/* nr records lost   */
+
+	long				watermark;	/* wakeup watermark  */
+
+	struct perf_event_mmap_page	*user_page;
+	void				*data_pages[0];
+};
+
+struct perf_pending_entry {
+	struct perf_pending_entry *next;
+	void (*func)(struct perf_pending_entry *);
+};
+
+/**
+ * struct perf_event - performance event kernel representation:
+ */
+struct perf_event {
+#ifdef CONFIG_PERF_EVENTS
+	struct list_head		group_entry;
+	struct list_head		event_entry;
+	struct list_head		sibling_list;
+	int				nr_siblings;
+	struct perf_event		*group_leader;
+	struct perf_event		*output;
+	const struct pmu		*pmu;
+
+	enum perf_event_active_state	state;
+	atomic64_t			count;
+
+	/*
+	 * These are the total time in nanoseconds that the event
+	 * has been enabled (i.e. eligible to run, and the task has
+	 * been scheduled in, if this is a per-task event)
+	 * and running (scheduled onto the CPU), respectively.
+	 *
+	 * They are computed from tstamp_enabled, tstamp_running and
+	 * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
+	 */
+	u64				total_time_enabled;
+	u64				total_time_running;
+
+	/*
+	 * These are timestamps used for computing total_time_enabled
+	 * and total_time_running when the event is in INACTIVE or
+	 * ACTIVE state, measured in nanoseconds from an arbitrary point
+	 * in time.
+	 * tstamp_enabled: the notional time when the event was enabled
+	 * tstamp_running: the notional time when the event was scheduled on
+	 * tstamp_stopped: in INACTIVE state, the notional time when the
+	 *	event was scheduled off.
+	 */
+	u64				tstamp_enabled;
+	u64				tstamp_running;
+	u64				tstamp_stopped;
+
+	struct perf_event_attr	attr;
+	struct hw_perf_event		hw;
+
+	struct perf_event_context	*ctx;
+	struct file			*filp;
+
+	/*
+	 * These accumulate total time (in nanoseconds) that children
+	 * events have been enabled and running, respectively.
+	 */
+	atomic64_t			child_total_time_enabled;
+	atomic64_t			child_total_time_running;
+
+	/*
+	 * Protect attach/detach and child_list:
+	 */
+	struct mutex			child_mutex;
+	struct list_head		child_list;
+	struct perf_event		*parent;
+
+	int				oncpu;
+	int				cpu;
+
+	struct list_head		owner_entry;
+	struct task_struct		*owner;
+
+	/* mmap bits */
+	struct mutex			mmap_mutex;
+	atomic_t			mmap_count;
+	struct perf_mmap_data		*data;
+
+	/* poll related */
+	wait_queue_head_t		waitq;
+	struct fasync_struct		*fasync;
+
+	/* delayed work for NMIs and such */
+	int				pending_wakeup;
+	int				pending_kill;
+	int				pending_disable;
+	struct perf_pending_entry	pending;
+
+	atomic_t			event_limit;
+
+	void (*destroy)(struct perf_event *);
+	struct rcu_head			rcu_head;
+
+	struct pid_namespace		*ns;
+	u64				id;
+#endif
+};
+
+/**
+ * struct perf_event_context - event context structure
+ *
+ * Used as a container for task events and CPU events as well:
+ */
+struct perf_event_context {
+	/*
+	 * Protect the states of the events in the list,
+	 * nr_active, and the list:
+	 */
+	spinlock_t			lock;
+	/*
+	 * Protect the list of events.  Locking either mutex or lock
+	 * is sufficient to ensure the list doesn't change; to change
+	 * the list you need to lock both the mutex and the spinlock.
+	 */
+	struct mutex			mutex;
+
+	struct list_head		group_list;
+	struct list_head		event_list;
+	int				nr_events;
+	int				nr_active;
+	int				is_active;
+	int				nr_stat;
+	atomic_t			refcount;
+	struct task_struct		*task;
+
+	/*
+	 * Context clock, runs when context enabled.
+	 */
+	u64				time;
+	u64				timestamp;
+
+	/*
+	 * These fields let us detect when two contexts have both
+	 * been cloned (inherited) from a common ancestor.
+	 */
+	struct perf_event_context	*parent_ctx;
+	u64				parent_gen;
+	u64				generation;
+	int				pin_count;
+	struct rcu_head			rcu_head;
+};
+
+/**
+ * struct perf_event_cpu_context - per cpu event context structure
+ */
+struct perf_cpu_context {
+	struct perf_event_context	ctx;
+	struct perf_event_context	*task_ctx;
+	int				active_oncpu;
+	int				max_pertask;
+	int				exclusive;
+
+	/*
+	 * Recursion avoidance:
+	 *
+	 * task, softirq, irq, nmi context
+	 */
+	int				recursion[4];
+};
+
+struct perf_output_handle {
+	struct perf_event		*event;
+	struct perf_mmap_data		*data;
+	unsigned long			head;
+	unsigned long			offset;
+	int				nmi;
+	int				sample;
+	int				locked;
+	unsigned long			flags;
+};
+
+#ifdef CONFIG_PERF_EVENTS
+
+/*
+ * Set by architecture code:
+ */
+extern int perf_max_events;
+
+extern const struct pmu *hw_perf_event_init(struct perf_event *event);
+
+extern void perf_event_task_sched_in(struct task_struct *task, int cpu);
+extern void perf_event_task_sched_out(struct task_struct *task,
+					struct task_struct *next, int cpu);
+extern void perf_event_task_tick(struct task_struct *task, int cpu);
+extern int perf_event_init_task(struct task_struct *child);
+extern void perf_event_exit_task(struct task_struct *child);
+extern void perf_event_free_task(struct task_struct *task);
+extern void set_perf_event_pending(void);
+extern void perf_event_do_pending(void);
+extern void perf_event_print_debug(void);
+extern void __perf_disable(void);
+extern bool __perf_enable(void);
+extern void perf_disable(void);
+extern void perf_enable(void);
+extern int perf_event_task_disable(void);
+extern int perf_event_task_enable(void);
+extern int hw_perf_group_sched_in(struct perf_event *group_leader,
+	       struct perf_cpu_context *cpuctx,
+	       struct perf_event_context *ctx, int cpu);
+extern void perf_event_update_userpage(struct perf_event *event);
+
+struct perf_sample_data {
+	u64				type;
+
+	u64				ip;
+	struct {
+		u32	pid;
+		u32	tid;
+	}				tid_entry;
+	u64				time;
+	u64				addr;
+	u64				id;
+	u64				stream_id;
+	struct {
+		u32	cpu;
+		u32	reserved;
+	}				cpu_entry;
+	u64				period;
+	struct perf_callchain_entry	*callchain;
+	struct perf_raw_record		*raw;
+};
+
+extern void perf_output_sample(struct perf_output_handle *handle,
+			       struct perf_event_header *header,
+			       struct perf_sample_data *data,
+			       struct perf_event *event);
+extern void perf_prepare_sample(struct perf_event_header *header,
+				struct perf_sample_data *data,
+				struct perf_event *event,
+				struct pt_regs *regs);
+
+extern int perf_event_overflow(struct perf_event *event, int nmi,
+				 struct perf_sample_data *data,
+				 struct pt_regs *regs);
+
+/*
+ * Return 1 for a software event, 0 for a hardware event
+ */
+static inline int is_software_event(struct perf_event *event)
+{
+	return (event->attr.type != PERF_TYPE_RAW) &&
+		(event->attr.type != PERF_TYPE_HARDWARE) &&
+		(event->attr.type != PERF_TYPE_HW_CACHE);
+}
+
+extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
+
+extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
+
+static inline void
+perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
+{
+	if (atomic_read(&perf_swevent_enabled[event_id]))
+		__perf_sw_event(event_id, nr, nmi, regs, addr);
+}
+
+extern void __perf_event_mmap(struct vm_area_struct *vma);
+
+static inline void perf_event_mmap(struct vm_area_struct *vma)
+{
+	if (vma->vm_flags & VM_EXEC)
+		__perf_event_mmap(vma);
+}
+
+extern void perf_event_comm(struct task_struct *tsk);
+extern void perf_event_fork(struct task_struct *tsk);
+
+extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
+
+extern int sysctl_perf_event_paranoid;
+extern int sysctl_perf_event_mlock;
+extern int sysctl_perf_event_sample_rate;
+
+extern void perf_event_init(void);
+extern void perf_tp_event(int event_id, u64 addr, u64 count,
+				 void *record, int entry_size);
+
+#ifndef perf_misc_flags
+#define perf_misc_flags(regs)	(user_mode(regs) ? PERF_RECORD_MISC_USER : \
+				 PERF_RECORD_MISC_KERNEL)
+#define perf_instruction_pointer(regs)	instruction_pointer(regs)
+#endif
+
+extern int perf_output_begin(struct perf_output_handle *handle,
+			     struct perf_event *event, unsigned int size,
+			     int nmi, int sample);
+extern void perf_output_end(struct perf_output_handle *handle);
+extern void perf_output_copy(struct perf_output_handle *handle,
+			     const void *buf, unsigned int len);
+#else
+static inline void
+perf_event_task_sched_in(struct task_struct *task, int cpu)		{ }
+static inline void
+perf_event_task_sched_out(struct task_struct *task,
+			    struct task_struct *next, int cpu)		{ }
+static inline void
+perf_event_task_tick(struct task_struct *task, int cpu)			{ }
+static inline int perf_event_init_task(struct task_struct *child)	{ return 0; }
+static inline void perf_event_exit_task(struct task_struct *child)	{ }
+static inline void perf_event_free_task(struct task_struct *task)	{ }
+static inline void perf_event_do_pending(void)				{ }
+static inline void perf_event_print_debug(void)				{ }
+static inline void perf_disable(void)					{ }
+static inline void perf_enable(void)					{ }
+static inline int perf_event_task_disable(void)				{ return -EINVAL; }
+static inline int perf_event_task_enable(void)				{ return -EINVAL; }
+
+static inline void
+perf_sw_event(u32 event_id, u64 nr, int nmi,
+		     struct pt_regs *regs, u64 addr)			{ }
+
+static inline void perf_event_mmap(struct vm_area_struct *vma)		{ }
+static inline void perf_event_comm(struct task_struct *tsk)		{ }
+static inline void perf_event_fork(struct task_struct *tsk)		{ }
+static inline void perf_event_init(void)				{ }
+
+#endif
+
+#define perf_output_put(handle, x) \
+	perf_output_copy((handle), &(x), sizeof(x))
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_PERF_EVENT_H */

+ 2 - 2
include/linux/prctl.h

@@ -85,7 +85,7 @@
 #define PR_SET_TIMERSLACK 29
 #define PR_SET_TIMERSLACK 29
 #define PR_GET_TIMERSLACK 30
 #define PR_GET_TIMERSLACK 30
 
 
-#define PR_TASK_PERF_COUNTERS_DISABLE		31
-#define PR_TASK_PERF_COUNTERS_ENABLE		32
+#define PR_TASK_PERF_EVENTS_DISABLE		31
+#define PR_TASK_PERF_EVENTS_ENABLE		32
 
 
 #endif /* _LINUX_PRCTL_H */
 #endif /* _LINUX_PRCTL_H */

Some files were not shown because too many files changed in this diff