浏览代码

Merge remote branch 'tip/perf/core' into oprofile/core

Conflicts:
	arch/arm/oprofile/common.c
	kernel/perf_event.c
Robert Richter 15 年之前
父节点
当前提交
6268464b37
共有 100 个文件被更改,包括 1086 次插入759 次删除
  1. 4 4
      CREDITS
  2. 5 3
      Documentation/kprobes.txt
  3. 27 11
      MAINTAINERS
  4. 12 1
      Makefile
  5. 3 0
      arch/Kconfig
  6. 7 13
      arch/alpha/kernel/entry.S
  7. 86 42
      arch/alpha/kernel/perf_event.c
  8. 1 1
      arch/alpha/kernel/process.c
  9. 15 41
      arch/alpha/kernel/signal.c
  10. 1 1
      arch/alpha/kernel/systbls.S
  11. 26 1
      arch/arm/Kconfig
  12. 1 1
      arch/arm/boot/compressed/Makefile
  13. 8 0
      arch/arm/common/it8152.c
  14. 4 0
      arch/arm/include/asm/pgtable.h
  15. 2 0
      arch/arm/kernel/entry-common.S
  16. 92 106
      arch/arm/kernel/perf_event.c
  17. 2 2
      arch/arm/mach-at91/at91sam9g45_devices.c
  18. 1 2
      arch/arm/mach-davinci/dm355.c
  19. 1 2
      arch/arm/mach-davinci/dm365.c
  20. 1 2
      arch/arm/mach-davinci/dm644x.c
  21. 1 2
      arch/arm/mach-davinci/dm646x.c
  22. 3 3
      arch/arm/mach-dove/include/mach/io.h
  23. 8 0
      arch/arm/mach-ixp4xx/common-pci.c
  24. 2 0
      arch/arm/mach-ixp4xx/include/mach/hardware.h
  25. 1 1
      arch/arm/mach-kirkwood/include/mach/kirkwood.h
  26. 2 2
      arch/arm/mach-kirkwood/pcie.c
  27. 6 1
      arch/arm/mach-mmp/include/mach/system.h
  28. 1 2
      arch/arm/mach-pxa/cpufreq-pxa2xx.c
  29. 13 1
      arch/arm/mach-pxa/include/mach/hardware.h
  30. 2 0
      arch/arm/mach-pxa/include/mach/io.h
  31. 5 1
      arch/arm/mach-pxa/palm27x.c
  32. 1 0
      arch/arm/mach-pxa/vpac270.c
  33. 3 0
      arch/arm/mach-u300/include/mach/gpio.h
  34. 7 1
      arch/arm/mach-vexpress/ct-ca9x4.c
  35. 17 2
      arch/arm/mm/alignment.c
  36. 29 2
      arch/arm/mm/mmu.c
  37. 56 6
      arch/arm/mm/proc-v7.S
  38. 12 21
      arch/arm/plat-nomadik/timer.c
  39. 1 1
      arch/arm/plat-omap/Kconfig
  40. 1 1
      arch/arm/plat-omap/mcbsp.c
  41. 5 20
      arch/arm/plat-omap/sram.c
  42. 1 2
      arch/avr32/kernel/module.c
  43. 1 2
      arch/h8300/kernel/module.c
  44. 0 1
      arch/m32r/include/asm/signal.h
  45. 1 0
      arch/m32r/include/asm/unistd.h
  46. 2 3
      arch/m32r/kernel/entry.S
  47. 4 3
      arch/m32r/kernel/ptrace.c
  48. 41 64
      arch/m32r/kernel/signal.c
  49. 3 3
      arch/m68k/mac/macboing.c
  50. 19 2
      arch/mips/Kconfig
  51. 2 3
      arch/mips/alchemy/common/prom.c
  52. 1 1
      arch/mips/boot/compressed/Makefile
  53. 4 0
      arch/mips/cavium-octeon/Kconfig
  54. 1 1
      arch/mips/cavium-octeon/cpu.c
  55. 1 1
      arch/mips/cavium-octeon/executive/Makefile
  56. 4 0
      arch/mips/include/asm/atomic.h
  57. 1 1
      arch/mips/include/asm/cop2.h
  58. 1 0
      arch/mips/include/asm/gic.h
  59. 1 1
      arch/mips/include/asm/mach-tx49xx/kmalloc.h
  60. 0 3
      arch/mips/include/asm/mips-boards/maltaint.h
  61. 14 0
      arch/mips/include/asm/page.h
  62. 2 1
      arch/mips/include/asm/thread_info.h
  63. 15 6
      arch/mips/include/asm/unistd.h
  64. 2 3
      arch/mips/kernel/irq-gic.c
  65. 1 1
      arch/mips/kernel/kgdb.c
  66. 1 1
      arch/mips/kernel/kspd.c
  67. 7 0
      arch/mips/kernel/linux32.c
  68. 4 1
      arch/mips/kernel/scall32-o32.S
  69. 5 2
      arch/mips/kernel/scall64-64.S
  70. 4 1
      arch/mips/kernel/scall64-n32.S
  71. 4 1
      arch/mips/kernel/scall64-o32.S
  72. 20 8
      arch/mips/mm/dma-default.c
  73. 1 1
      arch/mips/mm/sc-rm7k.c
  74. 3 0
      arch/mips/mti-malta/malta-int.c
  75. 1 1
      arch/mips/pci/pci-rc32434.c
  76. 5 15
      arch/mips/pnx8550/common/reset.c
  77. 1 2
      arch/mips/pnx8550/common/setup.c
  78. 0 1
      arch/mn10300/Kconfig
  79. 1 1
      arch/mn10300/Kconfig.debug
  80. 2 2
      arch/mn10300/include/asm/bitops.h
  81. 1 1
      arch/mn10300/include/asm/signal.h
  82. 1 2
      arch/mn10300/kernel/module.c
  83. 20 15
      arch/mn10300/kernel/signal.c
  84. 6 8
      arch/mn10300/mm/Makefile
  85. 21 0
      arch/mn10300/mm/cache-disabled.c
  86. 19 1
      arch/mn10300/mm/cache.c
  87. 1 2
      arch/parisc/kernel/module.c
  88. 0 6
      arch/powerpc/kernel/module.c
  89. 25 61
      arch/powerpc/kernel/perf_callchain.c
  90. 101 63
      arch/powerpc/kernel/perf_event.c
  91. 90 58
      arch/powerpc/kernel/perf_event_fsl_emb.c
  92. 1 1
      arch/powerpc/platforms/512x/clock.c
  93. 6 3
      arch/powerpc/platforms/52xx/efika.c
  94. 6 2
      arch/powerpc/platforms/52xx/mpc52xx_common.c
  95. 1 2
      arch/s390/kernel/module.c
  96. 0 2
      arch/sh/kernel/module.c
  97. 4 46
      arch/sh/kernel/perf_callchain.c
  98. 94 47
      arch/sh/kernel/perf_event.c
  99. 1 0
      arch/sparc/Kconfig
  100. 32 0
      arch/sparc/include/asm/jump_label.h

+ 4 - 4
CREDITS

@@ -3554,12 +3554,12 @@ E: cvance@nai.com
 D: portions of the Linux Security Module (LSM) framework and security modules
 D: portions of the Linux Security Module (LSM) framework and security modules
 
 
 N: Petr Vandrovec
 N: Petr Vandrovec
-E: vandrove@vc.cvut.cz
+E: petr@vandrovec.name
 D: Small contributions to ncpfs
 D: Small contributions to ncpfs
 D: Matrox framebuffer driver
 D: Matrox framebuffer driver
-S: Chudenicka 8
-S: 10200 Prague 10, Hostivar
-S: Czech Republic
+S: 21513 Conradia Ct
+S: Cupertino, CA 95014
+S: USA
 
 
 N: Thibaut Varene
 N: Thibaut Varene
 E: T-Bone@parisc-linux.org
 E: T-Bone@parisc-linux.org

+ 5 - 3
Documentation/kprobes.txt

@@ -542,9 +542,11 @@ Kprobes does not use mutexes or allocate memory except during
 registration and unregistration.
 registration and unregistration.
 
 
 Probe handlers are run with preemption disabled.  Depending on the
 Probe handlers are run with preemption disabled.  Depending on the
-architecture, handlers may also run with interrupts disabled.  In any
-case, your handler should not yield the CPU (e.g., by attempting to
-acquire a semaphore).
+architecture and optimization state, handlers may also run with
+interrupts disabled (e.g., kretprobe handlers and optimized kprobe
+handlers run without interrupt disabled on x86/x86-64).  In any case,
+your handler should not yield the CPU (e.g., by attempting to acquire
+a semaphore).
 
 
 Since a return probe is implemented by replacing the return
 Since a return probe is implemented by replacing the return
 address with the trampoline's address, stack backtraces and calls
 address with the trampoline's address, stack backtraces and calls

+ 27 - 11
MAINTAINERS

@@ -962,6 +962,13 @@ W:	http://www.fluff.org/ben/linux/
 S:	Maintained
 S:	Maintained
 F:	arch/arm/mach-s3c6410/
 F:	arch/arm/mach-s3c6410/
 
 
+ARM/S5P ARM ARCHITECTURES
+M:	Kukjin Kim <kgene.kim@samsung.com>
+L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L:	linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
+S:	Maintained
+F:	arch/arm/mach-s5p*/
+
 ARM/SHMOBILE ARM ARCHITECTURE
 ARM/SHMOBILE ARM ARCHITECTURE
 M:	Paul Mundt <lethal@linux-sh.org>
 M:	Paul Mundt <lethal@linux-sh.org>
 M:	Magnus Damm <magnus.damm@gmail.com>
 M:	Magnus Damm <magnus.damm@gmail.com>
@@ -1220,7 +1227,7 @@ F:	drivers/auxdisplay/
 F:	include/linux/cfag12864b.h
 F:	include/linux/cfag12864b.h
 
 
 AVR32 ARCHITECTURE
 AVR32 ARCHITECTURE
-M:	Haavard Skinnemoen <hskinnemoen@atmel.com>
+M:	Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>
 W:	http://www.atmel.com/products/AVR32/
 W:	http://www.atmel.com/products/AVR32/
 W:	http://avr32linux.org/
 W:	http://avr32linux.org/
 W:	http://avrfreaks.net/
 W:	http://avrfreaks.net/
@@ -1228,7 +1235,7 @@ S:	Supported
 F:	arch/avr32/
 F:	arch/avr32/
 
 
 AVR32/AT32AP MACHINE SUPPORT
 AVR32/AT32AP MACHINE SUPPORT
-M:	Haavard Skinnemoen <hskinnemoen@atmel.com>
+M:	Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>
 S:	Supported
 S:	Supported
 F:	arch/avr32/mach-at32ap/
 F:	arch/avr32/mach-at32ap/
 
 
@@ -2199,6 +2206,12 @@ W:	http://acpi4asus.sf.net
 S:	Maintained
 S:	Maintained
 F:	drivers/platform/x86/eeepc-laptop.c
 F:	drivers/platform/x86/eeepc-laptop.c
 
 
+EFIFB FRAMEBUFFER DRIVER
+L:	linux-fbdev@vger.kernel.org
+M:	Peter Jones <pjones@redhat.com>
+S:	Maintained
+F:	drivers/video/efifb.c
+
 EFS FILESYSTEM
 EFS FILESYSTEM
 W:	http://aeschi.ch.eu.org/efs/
 W:	http://aeschi.ch.eu.org/efs/
 S:	Orphan
 S:	Orphan
@@ -2662,6 +2675,8 @@ M:	Guenter Roeck <guenter.roeck@ericsson.com>
 L:	lm-sensors@lm-sensors.org
 L:	lm-sensors@lm-sensors.org
 W:	http://www.lm-sensors.org/
 W:	http://www.lm-sensors.org/
 T:	quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
 T:	quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
+T:	quilt kernel.org/pub/linux/kernel/people/groeck/linux-staging/
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
 S:	Maintained
 S:	Maintained
 F:	Documentation/hwmon/
 F:	Documentation/hwmon/
 F:	drivers/hwmon/
 F:	drivers/hwmon/
@@ -3773,9 +3788,8 @@ W:	http://www.syskonnect.com
 S:	Supported
 S:	Supported
 
 
 MATROX FRAMEBUFFER DRIVER
 MATROX FRAMEBUFFER DRIVER
-M:	Petr Vandrovec <vandrove@vc.cvut.cz>
 L:	linux-fbdev@vger.kernel.org
 L:	linux-fbdev@vger.kernel.org
-S:	Maintained
+S:	Orphan
 F:	drivers/video/matrox/matroxfb_*
 F:	drivers/video/matrox/matroxfb_*
 F:	include/linux/matroxfb.h
 F:	include/linux/matroxfb.h
 
 
@@ -3899,10 +3913,8 @@ F:	Documentation/serial/moxa-smartio
 F:	drivers/char/mxser.*
 F:	drivers/char/mxser.*
 
 
 MSI LAPTOP SUPPORT
 MSI LAPTOP SUPPORT
-M:	Lennart Poettering <mzxreary@0pointer.de>
+M:	Lee, Chun-Yi <jlee@novell.com>
 L:	platform-driver-x86@vger.kernel.org
 L:	platform-driver-x86@vger.kernel.org
-W:	https://tango.0pointer.de/mailman/listinfo/s270-linux
-W:	http://0pointer.de/lennart/tchibo.html
 S:	Maintained
 S:	Maintained
 F:	drivers/platform/x86/msi-laptop.c
 F:	drivers/platform/x86/msi-laptop.c
 
 
@@ -3919,8 +3931,10 @@ S:	Supported
 F:	drivers/mfd/
 F:	drivers/mfd/
 
 
 MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM
 MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM
-S:	Orphan
+M:	Chris Ball <cjb@laptop.org>
 L:	linux-mmc@vger.kernel.org
 L:	linux-mmc@vger.kernel.org
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git
+S:	Maintained
 F:	drivers/mmc/
 F:	drivers/mmc/
 F:	include/linux/mmc/
 F:	include/linux/mmc/
 
 
@@ -3962,8 +3976,8 @@ S:	Maintained
 F:	drivers/net/natsemi.c
 F:	drivers/net/natsemi.c
 
 
 NCP FILESYSTEM
 NCP FILESYSTEM
-M:	Petr Vandrovec <vandrove@vc.cvut.cz>
-S:	Maintained
+M:	Petr Vandrovec <petr@vandrovec.name>
+S:	Odd Fixes
 F:	fs/ncpfs/
 F:	fs/ncpfs/
 
 
 NCR DUAL 700 SCSI DRIVER (MICROCHANNEL)
 NCR DUAL 700 SCSI DRIVER (MICROCHANNEL)
@@ -5091,8 +5105,10 @@ S:	Maintained
 F:	drivers/mmc/host/sdricoh_cs.c
 F:	drivers/mmc/host/sdricoh_cs.c
 
 
 SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER
 SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER
-S:	Orphan
+M:	Chris Ball <cjb@laptop.org>
 L:	linux-mmc@vger.kernel.org
 L:	linux-mmc@vger.kernel.org
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git
+S:	Maintained
 F:	drivers/mmc/host/sdhci.*
 F:	drivers/mmc/host/sdhci.*
 
 
 SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF)
 SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF)

+ 12 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 2
 VERSION = 2
 PATCHLEVEL = 6
 PATCHLEVEL = 6
 SUBLEVEL = 36
 SUBLEVEL = 36
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc7
 NAME = Sheep on Meth
 NAME = Sheep on Meth
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*
@@ -568,6 +568,12 @@ endif
 
 
 ifdef CONFIG_FUNCTION_TRACER
 ifdef CONFIG_FUNCTION_TRACER
 KBUILD_CFLAGS	+= -pg
 KBUILD_CFLAGS	+= -pg
+ifdef CONFIG_DYNAMIC_FTRACE
+	ifdef CONFIG_HAVE_C_RECORDMCOUNT
+		BUILD_C_RECORDMCOUNT := y
+		export BUILD_C_RECORDMCOUNT
+	endif
+endif
 endif
 endif
 
 
 # We trigger additional mismatches with less inlining
 # We trigger additional mismatches with less inlining
@@ -591,6 +597,11 @@ KBUILD_CFLAGS	+= $(call cc-option,-fno-strict-overflow)
 # conserve stack if available
 # conserve stack if available
 KBUILD_CFLAGS   += $(call cc-option,-fconserve-stack)
 KBUILD_CFLAGS   += $(call cc-option,-fconserve-stack)
 
 
+# check for 'asm goto'
+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
+	KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
+endif
+
 # Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
 # Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
 # But warn user when we do so
 # But warn user when we do so
 warn-assign = \
 warn-assign = \

+ 3 - 0
arch/Kconfig

@@ -158,4 +158,7 @@ config HAVE_PERF_EVENTS_NMI
 	  subsystem.  Also has support for calculating CPU cycle events
 	  subsystem.  Also has support for calculating CPU cycle events
 	  to determine how many clock cycles in a given period.
 	  to determine how many clock cycles in a given period.
 
 
+config HAVE_ARCH_JUMP_LABEL
+	bool
+
 source "kernel/gcov/Kconfig"
 source "kernel/gcov/Kconfig"

+ 7 - 13
arch/alpha/kernel/entry.S

@@ -73,8 +73,6 @@
 	ldq	$20, HAE_REG($19);	\
 	ldq	$20, HAE_REG($19);	\
 	stq	$21, HAE_CACHE($19);	\
 	stq	$21, HAE_CACHE($19);	\
 	stq	$21, 0($20);		\
 	stq	$21, 0($20);		\
-	ldq	$0, 0($sp);		\
-	ldq	$1, 8($sp);		\
 99:;					\
 99:;					\
 	ldq	$19, 72($sp);		\
 	ldq	$19, 72($sp);		\
 	ldq	$20, 80($sp);		\
 	ldq	$20, 80($sp);		\
@@ -316,7 +314,7 @@ ret_from_sys_call:
 	cmovne	$26, 0, $19		/* $19 = 0 => non-restartable */
 	cmovne	$26, 0, $19		/* $19 = 0 => non-restartable */
 	ldq	$0, SP_OFF($sp)
 	ldq	$0, SP_OFF($sp)
 	and	$0, 8, $0
 	and	$0, 8, $0
-	beq	$0, restore_all
+	beq	$0, ret_to_kernel
 ret_to_user:
 ret_to_user:
 	/* Make sure need_resched and sigpending don't change between
 	/* Make sure need_resched and sigpending don't change between
 		sampling and the rti.  */
 		sampling and the rti.  */
@@ -329,6 +327,11 @@ restore_all:
 	RESTORE_ALL
 	RESTORE_ALL
 	call_pal PAL_rti
 	call_pal PAL_rti
 
 
+ret_to_kernel:
+	lda	$16, 7
+	call_pal PAL_swpipl
+	br restore_all
+
 	.align 3
 	.align 3
 $syscall_error:
 $syscall_error:
 	/*
 	/*
@@ -657,7 +660,7 @@ kernel_thread:
 	/* We don't actually care for a3 success widgetry in the kernel.
 	/* We don't actually care for a3 success widgetry in the kernel.
 	   Not for positive errno values.  */
 	   Not for positive errno values.  */
 	stq	$0, 0($sp)		/* $0 */
 	stq	$0, 0($sp)		/* $0 */
-	br	restore_all
+	br	ret_to_kernel
 .end kernel_thread
 .end kernel_thread
 
 
 /*
 /*
@@ -911,15 +914,6 @@ sys_execve:
 	jmp	$31, do_sys_execve
 	jmp	$31, do_sys_execve
 .end sys_execve
 .end sys_execve
 
 
-	.align	4
-	.globl	osf_sigprocmask
-	.ent	osf_sigprocmask
-osf_sigprocmask:
-	.prologue 0
-	mov	$sp, $18
-	jmp	$31, sys_osf_sigprocmask
-.end osf_sigprocmask
-
 	.align	4
 	.align	4
 	.globl	alpha_ni_syscall
 	.globl	alpha_ni_syscall
 	.ent	alpha_ni_syscall
 	.ent	alpha_ni_syscall

+ 86 - 42
arch/alpha/kernel/perf_event.c

@@ -307,7 +307,7 @@ again:
 			     new_raw_count) != prev_raw_count)
 			     new_raw_count) != prev_raw_count)
 		goto again;
 		goto again;
 
 
-	delta = (new_raw_count  - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf;
+	delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf;
 
 
 	/* It is possible on very rare occasions that the PMC has overflowed
 	/* It is possible on very rare occasions that the PMC has overflowed
 	 * but the interrupt is yet to come.  Detect and fix this situation.
 	 * but the interrupt is yet to come.  Detect and fix this situation.
@@ -402,14 +402,13 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc)
 		struct hw_perf_event *hwc = &pe->hw;
 		struct hw_perf_event *hwc = &pe->hw;
 		int idx = hwc->idx;
 		int idx = hwc->idx;
 
 
-		if (cpuc->current_idx[j] != PMC_NO_INDEX) {
-			cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
-			continue;
+		if (cpuc->current_idx[j] == PMC_NO_INDEX) {
+			alpha_perf_event_set_period(pe, hwc, idx);
+			cpuc->current_idx[j] = idx;
 		}
 		}
 
 
-		alpha_perf_event_set_period(pe, hwc, idx);
-		cpuc->current_idx[j] = idx;
-		cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
+		if (!(hwc->state & PERF_HES_STOPPED))
+			cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
 	}
 	}
 	cpuc->config = cpuc->event[0]->hw.config_base;
 	cpuc->config = cpuc->event[0]->hw.config_base;
 }
 }
@@ -420,12 +419,13 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc)
  *  - this function is called from outside this module via the pmu struct
  *  - this function is called from outside this module via the pmu struct
  *    returned from perf event initialisation.
  *    returned from perf event initialisation.
  */
  */
-static int alpha_pmu_enable(struct perf_event *event)
+static int alpha_pmu_add(struct perf_event *event, int flags)
 {
 {
 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+	struct hw_perf_event *hwc = &event->hw;
 	int n0;
 	int n0;
 	int ret;
 	int ret;
-	unsigned long flags;
+	unsigned long irq_flags;
 
 
 	/*
 	/*
 	 * The Sparc code has the IRQ disable first followed by the perf
 	 * The Sparc code has the IRQ disable first followed by the perf
@@ -435,8 +435,8 @@ static int alpha_pmu_enable(struct perf_event *event)
 	 * nevertheless we disable the PMCs first to enable a potential
 	 * nevertheless we disable the PMCs first to enable a potential
 	 * final PMI to occur before we disable interrupts.
 	 * final PMI to occur before we disable interrupts.
 	 */
 	 */
-	perf_disable();
-	local_irq_save(flags);
+	perf_pmu_disable(event->pmu);
+	local_irq_save(irq_flags);
 
 
 	/* Default to error to be returned */
 	/* Default to error to be returned */
 	ret = -EAGAIN;
 	ret = -EAGAIN;
@@ -455,8 +455,12 @@ static int alpha_pmu_enable(struct perf_event *event)
 		}
 		}
 	}
 	}
 
 
-	local_irq_restore(flags);
-	perf_enable();
+	hwc->state = PERF_HES_UPTODATE;
+	if (!(flags & PERF_EF_START))
+		hwc->state |= PERF_HES_STOPPED;
+
+	local_irq_restore(irq_flags);
+	perf_pmu_enable(event->pmu);
 
 
 	return ret;
 	return ret;
 }
 }
@@ -467,15 +471,15 @@ static int alpha_pmu_enable(struct perf_event *event)
  *  - this function is called from outside this module via the pmu struct
  *  - this function is called from outside this module via the pmu struct
  *    returned from perf event initialisation.
  *    returned from perf event initialisation.
  */
  */
-static void alpha_pmu_disable(struct perf_event *event)
+static void alpha_pmu_del(struct perf_event *event, int flags)
 {
 {
 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 	struct hw_perf_event *hwc = &event->hw;
 	struct hw_perf_event *hwc = &event->hw;
-	unsigned long flags;
+	unsigned long irq_flags;
 	int j;
 	int j;
 
 
-	perf_disable();
-	local_irq_save(flags);
+	perf_pmu_disable(event->pmu);
+	local_irq_save(irq_flags);
 
 
 	for (j = 0; j < cpuc->n_events; j++) {
 	for (j = 0; j < cpuc->n_events; j++) {
 		if (event == cpuc->event[j]) {
 		if (event == cpuc->event[j]) {
@@ -501,8 +505,8 @@ static void alpha_pmu_disable(struct perf_event *event)
 		}
 		}
 	}
 	}
 
 
-	local_irq_restore(flags);
-	perf_enable();
+	local_irq_restore(irq_flags);
+	perf_pmu_enable(event->pmu);
 }
 }
 
 
 
 
@@ -514,13 +518,44 @@ static void alpha_pmu_read(struct perf_event *event)
 }
 }
 
 
 
 
-static void alpha_pmu_unthrottle(struct perf_event *event)
+static void alpha_pmu_stop(struct perf_event *event, int flags)
+{
+	struct hw_perf_event *hwc = &event->hw;
+	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+	if (!(hwc->state & PERF_HES_STOPPED)) {
+		cpuc->idx_mask &= ~(1UL<<hwc->idx);
+		hwc->state |= PERF_HES_STOPPED;
+	}
+
+	if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
+		alpha_perf_event_update(event, hwc, hwc->idx, 0);
+		hwc->state |= PERF_HES_UPTODATE;
+	}
+
+	if (cpuc->enabled)
+		wrperfmon(PERFMON_CMD_DISABLE, (1UL<<hwc->idx));
+}
+
+
+static void alpha_pmu_start(struct perf_event *event, int flags)
 {
 {
 	struct hw_perf_event *hwc = &event->hw;
 	struct hw_perf_event *hwc = &event->hw;
 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
 
+	if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
+		return;
+
+	if (flags & PERF_EF_RELOAD) {
+		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+		alpha_perf_event_set_period(event, hwc, hwc->idx);
+	}
+
+	hwc->state = 0;
+
 	cpuc->idx_mask |= 1UL<<hwc->idx;
 	cpuc->idx_mask |= 1UL<<hwc->idx;
-	wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx));
+	if (cpuc->enabled)
+		wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx));
 }
 }
 
 
 
 
@@ -642,39 +677,36 @@ static int __hw_perf_event_init(struct perf_event *event)
 	return 0;
 	return 0;
 }
 }
 
 
-static const struct pmu pmu = {
-	.enable		= alpha_pmu_enable,
-	.disable	= alpha_pmu_disable,
-	.read		= alpha_pmu_read,
-	.unthrottle	= alpha_pmu_unthrottle,
-};
-
-
 /*
 /*
  * Main entry point to initialise a HW performance event.
  * Main entry point to initialise a HW performance event.
  */
  */
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+static int alpha_pmu_event_init(struct perf_event *event)
 {
 {
 	int err;
 	int err;
 
 
+	switch (event->attr.type) {
+	case PERF_TYPE_RAW:
+	case PERF_TYPE_HARDWARE:
+	case PERF_TYPE_HW_CACHE:
+		break;
+
+	default:
+		return -ENOENT;
+	}
+
 	if (!alpha_pmu)
 	if (!alpha_pmu)
-		return ERR_PTR(-ENODEV);
+		return -ENODEV;
 
 
 	/* Do the real initialisation work. */
 	/* Do the real initialisation work. */
 	err = __hw_perf_event_init(event);
 	err = __hw_perf_event_init(event);
 
 
-	if (err)
-		return ERR_PTR(err);
-
-	return &pmu;
+	return err;
 }
 }
 
 
-
-
 /*
 /*
  * Main entry point - enable HW performance counters.
  * Main entry point - enable HW performance counters.
  */
  */
-void hw_perf_enable(void)
+static void alpha_pmu_enable(struct pmu *pmu)
 {
 {
 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
 
@@ -700,7 +732,7 @@ void hw_perf_enable(void)
  * Main entry point - disable HW performance counters.
  * Main entry point - disable HW performance counters.
  */
  */
 
 
-void hw_perf_disable(void)
+static void alpha_pmu_disable(struct pmu *pmu)
 {
 {
 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
 
@@ -713,6 +745,17 @@ void hw_perf_disable(void)
 	wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
 	wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
 }
 }
 
 
+static struct pmu pmu = {
+	.pmu_enable	= alpha_pmu_enable,
+	.pmu_disable	= alpha_pmu_disable,
+	.event_init	= alpha_pmu_event_init,
+	.add		= alpha_pmu_add,
+	.del		= alpha_pmu_del,
+	.start		= alpha_pmu_start,
+	.stop		= alpha_pmu_stop,
+	.read		= alpha_pmu_read,
+};
+
 
 
 /*
 /*
  * Main entry point - don't know when this is called but it
  * Main entry point - don't know when this is called but it
@@ -766,7 +809,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
 	wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
 	wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
 
 
 	/* la_ptr is the counter that overflowed. */
 	/* la_ptr is the counter that overflowed. */
-	if (unlikely(la_ptr >= perf_max_events)) {
+	if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) {
 		/* This should never occur! */
 		/* This should never occur! */
 		irq_err_count++;
 		irq_err_count++;
 		pr_warning("PMI: silly index %ld\n", la_ptr);
 		pr_warning("PMI: silly index %ld\n", la_ptr);
@@ -807,7 +850,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
 			/* Interrupts coming too quickly; "throttle" the
 			/* Interrupts coming too quickly; "throttle" the
 			 * counter, i.e., disable it for a little while.
 			 * counter, i.e., disable it for a little while.
 			 */
 			 */
-			cpuc->idx_mask &= ~(1UL<<idx);
+			alpha_pmu_stop(event, 0);
 		}
 		}
 	}
 	}
 	wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
 	wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
@@ -837,6 +880,7 @@ void __init init_hw_perf_events(void)
 
 
 	/* And set up PMU specification */
 	/* And set up PMU specification */
 	alpha_pmu = &ev67_pmu;
 	alpha_pmu = &ev67_pmu;
-	perf_max_events = alpha_pmu->num_pmcs;
+
+	perf_pmu_register(&pmu);
 }
 }
 
 

+ 1 - 1
arch/alpha/kernel/process.c

@@ -356,7 +356,7 @@ dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti)
 	dest[27] = pt->r27;
 	dest[27] = pt->r27;
 	dest[28] = pt->r28;
 	dest[28] = pt->r28;
 	dest[29] = pt->gp;
 	dest[29] = pt->gp;
-	dest[30] = rdusp();
+	dest[30] = ti == current_thread_info() ? rdusp() : ti->pcb.usp;
 	dest[31] = pt->pc;
 	dest[31] = pt->pc;
 
 
 	/* Once upon a time this was the PS value.  Which is stupid
 	/* Once upon a time this was the PS value.  Which is stupid

+ 15 - 41
arch/alpha/kernel/signal.c

@@ -41,46 +41,20 @@ static void do_signal(struct pt_regs *, struct switch_stack *,
 /*
 /*
  * The OSF/1 sigprocmask calling sequence is different from the
  * The OSF/1 sigprocmask calling sequence is different from the
  * C sigprocmask() sequence..
  * C sigprocmask() sequence..
- *
- * how:
- * 1 - SIG_BLOCK
- * 2 - SIG_UNBLOCK
- * 3 - SIG_SETMASK
- *
- * We change the range to -1 .. 1 in order to let gcc easily
- * use the conditional move instructions.
- *
- * Note that we don't need to acquire the kernel lock for SMP
- * operation, as all of this is local to this thread.
  */
  */
-SYSCALL_DEFINE3(osf_sigprocmask, int, how, unsigned long, newmask,
-		struct pt_regs *, regs)
+SYSCALL_DEFINE2(osf_sigprocmask, int, how, unsigned long, newmask)
 {
 {
-	unsigned long oldmask = -EINVAL;
-
-	if ((unsigned long)how-1 <= 2) {
-		long sign = how-2;		/* -1 .. 1 */
-		unsigned long block, unblock;
-
-		newmask &= _BLOCKABLE;
-		spin_lock_irq(&current->sighand->siglock);
-		oldmask = current->blocked.sig[0];
-
-		unblock = oldmask & ~newmask;
-		block = oldmask | newmask;
-		if (!sign)
-			block = unblock;
-		if (sign <= 0)
-			newmask = block;
-		if (_NSIG_WORDS > 1 && sign > 0)
-			sigemptyset(&current->blocked);
-		current->blocked.sig[0] = newmask;
-		recalc_sigpending();
-		spin_unlock_irq(&current->sighand->siglock);
-
-		regs->r0 = 0;		/* special no error return */
+	sigset_t oldmask;
+	sigset_t mask;
+	unsigned long res;
+
+	siginitset(&mask, newmask & _BLOCKABLE);
+	res = sigprocmask(how, &mask, &oldmask);
+	if (!res) {
+		force_successful_syscall_return();
+		res = oldmask.sig[0];
 	}
 	}
-	return oldmask;
+	return res;
 }
 }
 
 
 SYSCALL_DEFINE3(osf_sigaction, int, sig,
 SYSCALL_DEFINE3(osf_sigaction, int, sig,
@@ -94,9 +68,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig,
 		old_sigset_t mask;
 		old_sigset_t mask;
 		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
 		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
 		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
 		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
-		    __get_user(new_ka.sa.sa_flags, &act->sa_flags))
+		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
+		    __get_user(mask, &act->sa_mask))
 			return -EFAULT;
 			return -EFAULT;
-		__get_user(mask, &act->sa_mask);
 		siginitset(&new_ka.sa.sa_mask, mask);
 		siginitset(&new_ka.sa.sa_mask, mask);
 		new_ka.ka_restorer = NULL;
 		new_ka.ka_restorer = NULL;
 	}
 	}
@@ -106,9 +80,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig,
 	if (!ret && oact) {
 	if (!ret && oact) {
 		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
 		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
 		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
 		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
-		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags))
+		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
+		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
 			return -EFAULT;
 			return -EFAULT;
-		__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
 	}
 	}
 
 
 	return ret;
 	return ret;

+ 1 - 1
arch/alpha/kernel/systbls.S

@@ -58,7 +58,7 @@ sys_call_table:
 	.quad sys_open				/* 45 */
 	.quad sys_open				/* 45 */
 	.quad alpha_ni_syscall
 	.quad alpha_ni_syscall
 	.quad sys_getxgid
 	.quad sys_getxgid
-	.quad osf_sigprocmask
+	.quad sys_osf_sigprocmask
 	.quad alpha_ni_syscall
 	.quad alpha_ni_syscall
 	.quad alpha_ni_syscall			/* 50 */
 	.quad alpha_ni_syscall			/* 50 */
 	.quad sys_acct
 	.quad sys_acct

+ 26 - 1
arch/arm/Kconfig

@@ -271,7 +271,6 @@ config ARCH_AT91
 	bool "Atmel AT91"
 	bool "Atmel AT91"
 	select ARCH_REQUIRE_GPIOLIB
 	select ARCH_REQUIRE_GPIOLIB
 	select HAVE_CLK
 	select HAVE_CLK
-	select ARCH_USES_GETTIMEOFFSET
 	help
 	help
 	  This enables support for systems based on the Atmel AT91RM9200,
 	  This enables support for systems based on the Atmel AT91RM9200,
 	  AT91SAM9 and AT91CAP9 processors.
 	  AT91SAM9 and AT91CAP9 processors.
@@ -1051,6 +1050,32 @@ config ARM_ERRATA_460075
 	  ACTLR register. Note that setting specific bits in the ACTLR register
 	  ACTLR register. Note that setting specific bits in the ACTLR register
 	  may not be available in non-secure mode.
 	  may not be available in non-secure mode.
 
 
+config ARM_ERRATA_742230
+	bool "ARM errata: DMB operation may be faulty"
+	depends on CPU_V7 && SMP
+	help
+	  This option enables the workaround for the 742230 Cortex-A9
+	  (r1p0..r2p2) erratum. Under rare circumstances, a DMB instruction
+	  between two write operations may not ensure the correct visibility
+	  ordering of the two writes. This workaround sets a specific bit in
+	  the diagnostic register of the Cortex-A9 which causes the DMB
+	  instruction to behave as a DSB, ensuring the correct behaviour of
+	  the two writes.
+
+config ARM_ERRATA_742231
+	bool "ARM errata: Incorrect hazard handling in the SCU may lead to data corruption"
+	depends on CPU_V7 && SMP
+	help
+	  This option enables the workaround for the 742231 Cortex-A9
+	  (r2p0..r2p2) erratum. Under certain conditions, specific to the
+	  Cortex-A9 MPCore micro-architecture, two CPUs working in SMP mode,
+	  accessing some data located in the same cache line, may get corrupted
+	  data due to bad handling of the address hazard when the line gets
+	  replaced from one of the CPUs at the same time as another CPU is
+	  accessing it. This workaround sets specific bits in the diagnostic
+	  register of the Cortex-A9 which reduces the linefill issuing
+	  capabilities of the processor.
+
 config PL310_ERRATA_588369
 config PL310_ERRATA_588369
 	bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
 	bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
 	depends on CACHE_L2X0 && ARCH_OMAP4
 	depends on CACHE_L2X0 && ARCH_OMAP4

+ 1 - 1
arch/arm/boot/compressed/Makefile

@@ -116,5 +116,5 @@ CFLAGS_font.o := -Dstatic=
 $(obj)/font.c: $(FONTC)
 $(obj)/font.c: $(FONTC)
 	$(call cmd,shipped)
 	$(call cmd,shipped)
 
 
-$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile .config
+$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG)
 	@sed "$(SEDFLAGS)" < $< > $@
 	@sed "$(SEDFLAGS)" < $< > $@

+ 8 - 0
arch/arm/common/it8152.c

@@ -271,6 +271,14 @@ int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
 		((dma_addr + size - PHYS_OFFSET) >= SZ_64M);
 		((dma_addr + size - PHYS_OFFSET) >= SZ_64M);
 }
 }
 
 
+int dma_set_coherent_mask(struct device *dev, u64 mask)
+{
+	if (mask >= PHYS_OFFSET + SZ_64M - 1)
+		return 0;
+
+	return -EIO;
+}
+
 int __init it8152_pci_setup(int nr, struct pci_sys_data *sys)
 int __init it8152_pci_setup(int nr, struct pci_sys_data *sys)
 {
 {
 	it8152_io.start = IT8152_IO_BASE + 0x12000;
 	it8152_io.start = IT8152_IO_BASE + 0x12000;

+ 4 - 0
arch/arm/include/asm/pgtable.h

@@ -317,6 +317,10 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
 #define pgprot_dmacoherent(prot) \
 #define pgprot_dmacoherent(prot) \
 	__pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE)
 	__pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE)
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+				     unsigned long size, pgprot_t vma_prot);
 #else
 #else
 #define pgprot_dmacoherent(prot) \
 #define pgprot_dmacoherent(prot) \
 	__pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)
 	__pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)

+ 2 - 0
arch/arm/kernel/entry-common.S

@@ -48,6 +48,8 @@ work_pending:
 	beq	no_work_pending
 	beq	no_work_pending
 	mov	r0, sp				@ 'regs'
 	mov	r0, sp				@ 'regs'
 	mov	r2, why				@ 'syscall'
 	mov	r2, why				@ 'syscall'
+	tst	r1, #_TIF_SIGPENDING		@ delivering a signal?
+	movne	why, #0				@ prevent further restarts
 	bl	do_notify_resume
 	bl	do_notify_resume
 	b	ret_slow_syscall		@ Check work again
 	b	ret_slow_syscall		@ Check work again
 
 

+ 92 - 106
arch/arm/kernel/perf_event.c

@@ -227,46 +227,56 @@ again:
 }
 }
 
 
 static void
 static void
-armpmu_disable(struct perf_event *event)
+armpmu_read(struct perf_event *event)
 {
 {
-	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 	struct hw_perf_event *hwc = &event->hw;
 	struct hw_perf_event *hwc = &event->hw;
-	int idx = hwc->idx;
-
-	WARN_ON(idx < 0);
-
-	clear_bit(idx, cpuc->active_mask);
-	armpmu->disable(hwc, idx);
-
-	barrier();
 
 
-	armpmu_event_update(event, hwc, idx);
-	cpuc->events[idx] = NULL;
-	clear_bit(idx, cpuc->used_mask);
+	/* Don't read disabled counters! */
+	if (hwc->idx < 0)
+		return;
 
 
-	perf_event_update_userpage(event);
+	armpmu_event_update(event, hwc, hwc->idx);
 }
 }
 
 
 static void
 static void
-armpmu_read(struct perf_event *event)
+armpmu_stop(struct perf_event *event, int flags)
 {
 {
 	struct hw_perf_event *hwc = &event->hw;
 	struct hw_perf_event *hwc = &event->hw;
 
 
-	/* Don't read disabled counters! */
-	if (hwc->idx < 0)
+	if (!armpmu)
 		return;
 		return;
 
 
-	armpmu_event_update(event, hwc, hwc->idx);
+	/*
+	 * ARM pmu always has to update the counter, so ignore
+	 * PERF_EF_UPDATE, see comments in armpmu_start().
+	 */
+	if (!(hwc->state & PERF_HES_STOPPED)) {
+		armpmu->disable(hwc, hwc->idx);
+		barrier(); /* why? */
+		armpmu_event_update(event, hwc, hwc->idx);
+		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+	}
 }
 }
 
 
 static void
 static void
-armpmu_unthrottle(struct perf_event *event)
+armpmu_start(struct perf_event *event, int flags)
 {
 {
 	struct hw_perf_event *hwc = &event->hw;
 	struct hw_perf_event *hwc = &event->hw;
 
 
+	if (!armpmu)
+		return;
+
+	/*
+	 * ARM pmu always has to reprogram the period, so ignore
+	 * PERF_EF_RELOAD, see the comment below.
+	 */
+	if (flags & PERF_EF_RELOAD)
+		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+
+	hwc->state = 0;
 	/*
 	/*
 	 * Set the period again. Some counters can't be stopped, so when we
 	 * Set the period again. Some counters can't be stopped, so when we
-	 * were throttled we simply disabled the IRQ source and the counter
+	 * were stopped we simply disabled the IRQ source and the counter
 	 * may have been left counting. If we don't do this step then we may
 	 * may have been left counting. If we don't do this step then we may
 	 * get an interrupt too soon or *way* too late if the overflow has
 	 * get an interrupt too soon or *way* too late if the overflow has
 	 * happened since disabling.
 	 * happened since disabling.
@@ -275,14 +285,33 @@ armpmu_unthrottle(struct perf_event *event)
 	armpmu->enable(hwc, hwc->idx);
 	armpmu->enable(hwc, hwc->idx);
 }
 }
 
 
+static void
+armpmu_del(struct perf_event *event, int flags)
+{
+	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+	struct hw_perf_event *hwc = &event->hw;
+	int idx = hwc->idx;
+
+	WARN_ON(idx < 0);
+
+	clear_bit(idx, cpuc->active_mask);
+	armpmu_stop(event, PERF_EF_UPDATE);
+	cpuc->events[idx] = NULL;
+	clear_bit(idx, cpuc->used_mask);
+
+	perf_event_update_userpage(event);
+}
+
 static int
 static int
-armpmu_enable(struct perf_event *event)
+armpmu_add(struct perf_event *event, int flags)
 {
 {
 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 	struct hw_perf_event *hwc = &event->hw;
 	struct hw_perf_event *hwc = &event->hw;
 	int idx;
 	int idx;
 	int err = 0;
 	int err = 0;
 
 
+	perf_pmu_disable(event->pmu);
+
 	/* If we don't have a space for the counter then finish early. */
 	/* If we don't have a space for the counter then finish early. */
 	idx = armpmu->get_event_idx(cpuc, hwc);
 	idx = armpmu->get_event_idx(cpuc, hwc);
 	if (idx < 0) {
 	if (idx < 0) {
@@ -299,25 +328,19 @@ armpmu_enable(struct perf_event *event)
 	cpuc->events[idx] = event;
 	cpuc->events[idx] = event;
 	set_bit(idx, cpuc->active_mask);
 	set_bit(idx, cpuc->active_mask);
 
 
-	/* Set the period for the event. */
-	armpmu_event_set_period(event, hwc, idx);
-
-	/* Enable the event. */
-	armpmu->enable(hwc, idx);
+	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+	if (flags & PERF_EF_START)
+		armpmu_start(event, PERF_EF_RELOAD);
 
 
 	/* Propagate our changes to the userspace mapping. */
 	/* Propagate our changes to the userspace mapping. */
 	perf_event_update_userpage(event);
 	perf_event_update_userpage(event);
 
 
 out:
 out:
+	perf_pmu_enable(event->pmu);
 	return err;
 	return err;
 }
 }
 
 
-static struct pmu pmu = {
-	.enable	    = armpmu_enable,
-	.disable    = armpmu_disable,
-	.unthrottle = armpmu_unthrottle,
-	.read	    = armpmu_read,
-};
+static struct pmu pmu;
 
 
 static int
 static int
 validate_event(struct cpu_hw_events *cpuc,
 validate_event(struct cpu_hw_events *cpuc,
@@ -497,20 +520,29 @@ __hw_perf_event_init(struct perf_event *event)
 	return err;
 	return err;
 }
 }
 
 
-const struct pmu *
-hw_perf_event_init(struct perf_event *event)
+static int armpmu_event_init(struct perf_event *event)
 {
 {
 	int err = 0;
 	int err = 0;
 
 
+	switch (event->attr.type) {
+	case PERF_TYPE_RAW:
+	case PERF_TYPE_HARDWARE:
+	case PERF_TYPE_HW_CACHE:
+		break;
+
+	default:
+		return -ENOENT;
+	}
+
 	if (!armpmu)
 	if (!armpmu)
-		return ERR_PTR(-ENODEV);
+		return -ENODEV;
 
 
 	event->destroy = hw_perf_event_destroy;
 	event->destroy = hw_perf_event_destroy;
 
 
 	if (!atomic_inc_not_zero(&active_events)) {
 	if (!atomic_inc_not_zero(&active_events)) {
-		if (atomic_read(&active_events) > perf_max_events) {
+		if (atomic_read(&active_events) > armpmu->num_events) {
 			atomic_dec(&active_events);
 			atomic_dec(&active_events);
-			return ERR_PTR(-ENOSPC);
+			return -ENOSPC;
 		}
 		}
 
 
 		mutex_lock(&pmu_reserve_mutex);
 		mutex_lock(&pmu_reserve_mutex);
@@ -524,17 +556,16 @@ hw_perf_event_init(struct perf_event *event)
 	}
 	}
 
 
 	if (err)
 	if (err)
-		return ERR_PTR(err);
+		return err;
 
 
 	err = __hw_perf_event_init(event);
 	err = __hw_perf_event_init(event);
 	if (err)
 	if (err)
 		hw_perf_event_destroy(event);
 		hw_perf_event_destroy(event);
 
 
-	return err ? ERR_PTR(err) : &pmu;
+	return err;
 }
 }
 
 
-void
-hw_perf_enable(void)
+static void armpmu_enable(struct pmu *pmu)
 {
 {
 	/* Enable all of the perf events on hardware. */
 	/* Enable all of the perf events on hardware. */
 	int idx;
 	int idx;
@@ -555,13 +586,23 @@ hw_perf_enable(void)
 	armpmu->start();
 	armpmu->start();
 }
 }
 
 
-void
-hw_perf_disable(void)
+static void armpmu_disable(struct pmu *pmu)
 {
 {
 	if (armpmu)
 	if (armpmu)
 		armpmu->stop();
 		armpmu->stop();
 }
 }
 
 
+static struct pmu pmu = {
+	.pmu_enable	= armpmu_enable,
+	.pmu_disable	= armpmu_disable,
+	.event_init	= armpmu_event_init,
+	.add		= armpmu_add,
+	.del		= armpmu_del,
+	.start		= armpmu_start,
+	.stop		= armpmu_stop,
+	.read		= armpmu_read,
+};
+
 /*
 /*
  * ARMv6 Performance counter handling code.
  * ARMv6 Performance counter handling code.
  *
  *
@@ -2939,14 +2980,12 @@ init_hw_perf_events(void)
 			armpmu = &armv6pmu;
 			armpmu = &armv6pmu;
 			memcpy(armpmu_perf_cache_map, armv6_perf_cache_map,
 			memcpy(armpmu_perf_cache_map, armv6_perf_cache_map,
 					sizeof(armv6_perf_cache_map));
 					sizeof(armv6_perf_cache_map));
-			perf_max_events	= armv6pmu.num_events;
 			break;
 			break;
 		case 0xB020:	/* ARM11mpcore */
 		case 0xB020:	/* ARM11mpcore */
 			armpmu = &armv6mpcore_pmu;
 			armpmu = &armv6mpcore_pmu;
 			memcpy(armpmu_perf_cache_map,
 			memcpy(armpmu_perf_cache_map,
 			       armv6mpcore_perf_cache_map,
 			       armv6mpcore_perf_cache_map,
 			       sizeof(armv6mpcore_perf_cache_map));
 			       sizeof(armv6mpcore_perf_cache_map));
-			perf_max_events = armv6mpcore_pmu.num_events;
 			break;
 			break;
 		case 0xC080:	/* Cortex-A8 */
 		case 0xC080:	/* Cortex-A8 */
 			armv7pmu.id = ARM_PERF_PMU_ID_CA8;
 			armv7pmu.id = ARM_PERF_PMU_ID_CA8;
@@ -2958,7 +2997,6 @@ init_hw_perf_events(void)
 			/* Reset PMNC and read the nb of CNTx counters
 			/* Reset PMNC and read the nb of CNTx counters
 			    supported */
 			    supported */
 			armv7pmu.num_events = armv7_reset_read_pmnc();
 			armv7pmu.num_events = armv7_reset_read_pmnc();
-			perf_max_events = armv7pmu.num_events;
 			break;
 			break;
 		case 0xC090:	/* Cortex-A9 */
 		case 0xC090:	/* Cortex-A9 */
 			armv7pmu.id = ARM_PERF_PMU_ID_CA9;
 			armv7pmu.id = ARM_PERF_PMU_ID_CA9;
@@ -2970,7 +3008,6 @@ init_hw_perf_events(void)
 			/* Reset PMNC and read the nb of CNTx counters
 			/* Reset PMNC and read the nb of CNTx counters
 			    supported */
 			    supported */
 			armv7pmu.num_events = armv7_reset_read_pmnc();
 			armv7pmu.num_events = armv7_reset_read_pmnc();
-			perf_max_events = armv7pmu.num_events;
 			break;
 			break;
 		}
 		}
 	/* Intel CPUs [xscale]. */
 	/* Intel CPUs [xscale]. */
@@ -2981,13 +3018,11 @@ init_hw_perf_events(void)
 			armpmu = &xscale1pmu;
 			armpmu = &xscale1pmu;
 			memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
 			memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
 					sizeof(xscale_perf_cache_map));
 					sizeof(xscale_perf_cache_map));
-			perf_max_events	= xscale1pmu.num_events;
 			break;
 			break;
 		case 2:
 		case 2:
 			armpmu = &xscale2pmu;
 			armpmu = &xscale2pmu;
 			memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
 			memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
 					sizeof(xscale_perf_cache_map));
 					sizeof(xscale_perf_cache_map));
-			perf_max_events	= xscale2pmu.num_events;
 			break;
 			break;
 		}
 		}
 	}
 	}
@@ -2997,9 +3032,10 @@ init_hw_perf_events(void)
 				arm_pmu_names[armpmu->id], armpmu->num_events);
 				arm_pmu_names[armpmu->id], armpmu->num_events);
 	} else {
 	} else {
 		pr_info("no hardware support available\n");
 		pr_info("no hardware support available\n");
-		perf_max_events = -1;
 	}
 	}
 
 
+	perf_pmu_register(&pmu);
+
 	return 0;
 	return 0;
 }
 }
 arch_initcall(init_hw_perf_events);
 arch_initcall(init_hw_perf_events);
@@ -3007,13 +3043,6 @@ arch_initcall(init_hw_perf_events);
 /*
 /*
  * Callchain handling code.
  * Callchain handling code.
  */
  */
-static inline void
-callchain_store(struct perf_callchain_entry *entry,
-		u64 ip)
-{
-	if (entry->nr < PERF_MAX_STACK_DEPTH)
-		entry->ip[entry->nr++] = ip;
-}
 
 
 /*
 /*
  * The registers we're interested in are at the end of the variable
  * The registers we're interested in are at the end of the variable
@@ -3045,7 +3074,7 @@ user_backtrace(struct frame_tail *tail,
 	if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
 	if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
 		return NULL;
 		return NULL;
 
 
-	callchain_store(entry, buftail.lr);
+	perf_callchain_store(entry, buftail.lr);
 
 
 	/*
 	/*
 	 * Frame pointers should strictly progress back up the stack
 	 * Frame pointers should strictly progress back up the stack
@@ -3057,16 +3086,11 @@ user_backtrace(struct frame_tail *tail,
 	return buftail.fp - 1;
 	return buftail.fp - 1;
 }
 }
 
 
-static void
-perf_callchain_user(struct pt_regs *regs,
-		    struct perf_callchain_entry *entry)
+void
+perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
 {
 	struct frame_tail *tail;
 	struct frame_tail *tail;
 
 
-	callchain_store(entry, PERF_CONTEXT_USER);
-
-	if (!user_mode(regs))
-		regs = task_pt_regs(current);
 
 
 	tail = (struct frame_tail *)regs->ARM_fp - 1;
 	tail = (struct frame_tail *)regs->ARM_fp - 1;
 
 
@@ -3084,56 +3108,18 @@ callchain_trace(struct stackframe *fr,
 		void *data)
 		void *data)
 {
 {
 	struct perf_callchain_entry *entry = data;
 	struct perf_callchain_entry *entry = data;
-	callchain_store(entry, fr->pc);
+	perf_callchain_store(entry, fr->pc);
 	return 0;
 	return 0;
 }
 }
 
 
-static void
-perf_callchain_kernel(struct pt_regs *regs,
-		      struct perf_callchain_entry *entry)
+void
+perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
 {
 	struct stackframe fr;
 	struct stackframe fr;
 
 
-	callchain_store(entry, PERF_CONTEXT_KERNEL);
 	fr.fp = regs->ARM_fp;
 	fr.fp = regs->ARM_fp;
 	fr.sp = regs->ARM_sp;
 	fr.sp = regs->ARM_sp;
 	fr.lr = regs->ARM_lr;
 	fr.lr = regs->ARM_lr;
 	fr.pc = regs->ARM_pc;
 	fr.pc = regs->ARM_pc;
 	walk_stackframe(&fr, callchain_trace, entry);
 	walk_stackframe(&fr, callchain_trace, entry);
 }
 }
-
-static void
-perf_do_callchain(struct pt_regs *regs,
-		  struct perf_callchain_entry *entry)
-{
-	int is_user;
-
-	if (!regs)
-		return;
-
-	is_user = user_mode(regs);
-
-	if (!current || !current->pid)
-		return;
-
-	if (is_user && current->state != TASK_RUNNING)
-		return;
-
-	if (!is_user)
-		perf_callchain_kernel(regs, entry);
-
-	if (current->mm)
-		perf_callchain_user(regs, entry);
-}
-
-static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
-
-struct perf_callchain_entry *
-perf_callchain(struct pt_regs *regs)
-{
-	struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
-
-	entry->nr = 0;
-	perf_do_callchain(regs, entry);
-	return entry;
-}

+ 2 - 2
arch/arm/mach-at91/at91sam9g45_devices.c

@@ -426,7 +426,7 @@ static struct i2c_gpio_platform_data pdata_i2c0 = {
 	.sda_is_open_drain	= 1,
 	.sda_is_open_drain	= 1,
 	.scl_pin		= AT91_PIN_PA21,
 	.scl_pin		= AT91_PIN_PA21,
 	.scl_is_open_drain	= 1,
 	.scl_is_open_drain	= 1,
-	.udelay			= 2,		/* ~100 kHz */
+	.udelay			= 5,		/* ~100 kHz */
 };
 };
 
 
 static struct platform_device at91sam9g45_twi0_device = {
 static struct platform_device at91sam9g45_twi0_device = {
@@ -440,7 +440,7 @@ static struct i2c_gpio_platform_data pdata_i2c1 = {
 	.sda_is_open_drain	= 1,
 	.sda_is_open_drain	= 1,
 	.scl_pin		= AT91_PIN_PB11,
 	.scl_pin		= AT91_PIN_PB11,
 	.scl_is_open_drain	= 1,
 	.scl_is_open_drain	= 1,
-	.udelay			= 2,		/* ~100 kHz */
+	.udelay			= 5,		/* ~100 kHz */
 };
 };
 
 
 static struct platform_device at91sam9g45_twi1_device = {
 static struct platform_device at91sam9g45_twi1_device = {

+ 1 - 2
arch/arm/mach-davinci/dm355.c

@@ -769,8 +769,7 @@ static struct map_desc dm355_io_desc[] = {
 		.virtual	= SRAM_VIRT,
 		.virtual	= SRAM_VIRT,
 		.pfn		= __phys_to_pfn(0x00010000),
 		.pfn		= __phys_to_pfn(0x00010000),
 		.length		= SZ_32K,
 		.length		= SZ_32K,
-		/* MT_MEMORY_NONCACHED requires supersection alignment */
-		.type		= MT_DEVICE,
+		.type		= MT_MEMORY_NONCACHED,
 	},
 	},
 };
 };
 
 

+ 1 - 2
arch/arm/mach-davinci/dm365.c

@@ -969,8 +969,7 @@ static struct map_desc dm365_io_desc[] = {
 		.virtual	= SRAM_VIRT,
 		.virtual	= SRAM_VIRT,
 		.pfn		= __phys_to_pfn(0x00010000),
 		.pfn		= __phys_to_pfn(0x00010000),
 		.length		= SZ_32K,
 		.length		= SZ_32K,
-		/* MT_MEMORY_NONCACHED requires supersection alignment */
-		.type		= MT_DEVICE,
+		.type		= MT_MEMORY_NONCACHED,
 	},
 	},
 };
 };
 
 

+ 1 - 2
arch/arm/mach-davinci/dm644x.c

@@ -653,8 +653,7 @@ static struct map_desc dm644x_io_desc[] = {
 		.virtual	= SRAM_VIRT,
 		.virtual	= SRAM_VIRT,
 		.pfn		= __phys_to_pfn(0x00008000),
 		.pfn		= __phys_to_pfn(0x00008000),
 		.length		= SZ_16K,
 		.length		= SZ_16K,
-		/* MT_MEMORY_NONCACHED requires supersection alignment */
-		.type		= MT_DEVICE,
+		.type		= MT_MEMORY_NONCACHED,
 	},
 	},
 };
 };
 
 

+ 1 - 2
arch/arm/mach-davinci/dm646x.c

@@ -737,8 +737,7 @@ static struct map_desc dm646x_io_desc[] = {
 		.virtual	= SRAM_VIRT,
 		.virtual	= SRAM_VIRT,
 		.pfn		= __phys_to_pfn(0x00010000),
 		.pfn		= __phys_to_pfn(0x00010000),
 		.length		= SZ_32K,
 		.length		= SZ_32K,
-		/* MT_MEMORY_NONCACHED requires supersection alignment */
-		.type		= MT_DEVICE,
+		.type		= MT_MEMORY_NONCACHED,
 	},
 	},
 };
 };
 
 

+ 3 - 3
arch/arm/mach-dove/include/mach/io.h

@@ -13,8 +13,8 @@
 
 
 #define IO_SPACE_LIMIT		0xffffffff
 #define IO_SPACE_LIMIT		0xffffffff
 
 
-#define __io(a)  ((void __iomem *)(((a) - DOVE_PCIE0_IO_PHYS_BASE) +\
-				   DOVE_PCIE0_IO_VIRT_BASE))
-#define __mem_pci(a)		(a)
+#define __io(a)  	((void __iomem *)(((a) - DOVE_PCIE0_IO_BUS_BASE) + \
+						 DOVE_PCIE0_IO_VIRT_BASE))
+#define __mem_pci(a)	(a)
 
 
 #endif
 #endif

+ 8 - 0
arch/arm/mach-ixp4xx/common-pci.c

@@ -503,6 +503,14 @@ struct pci_bus * __devinit ixp4xx_scan_bus(int nr, struct pci_sys_data *sys)
 	return pci_scan_bus(sys->busnr, &ixp4xx_ops, sys);
 	return pci_scan_bus(sys->busnr, &ixp4xx_ops, sys);
 }
 }
 
 
+int dma_set_coherent_mask(struct device *dev, u64 mask)
+{
+	if (mask >= SZ_64M - 1)
+		return 0;
+
+	return -EIO;
+}
+
 EXPORT_SYMBOL(ixp4xx_pci_read);
 EXPORT_SYMBOL(ixp4xx_pci_read);
 EXPORT_SYMBOL(ixp4xx_pci_write);
 EXPORT_SYMBOL(ixp4xx_pci_write);
 
 

+ 2 - 0
arch/arm/mach-ixp4xx/include/mach/hardware.h

@@ -26,6 +26,8 @@
 #define PCIBIOS_MAX_MEM		0x4BFFFFFF
 #define PCIBIOS_MAX_MEM		0x4BFFFFFF
 #endif
 #endif
 
 
+#define ARCH_HAS_DMA_SET_COHERENT_MASK
+
 #define pcibios_assign_all_busses()	1
 #define pcibios_assign_all_busses()	1
 
 
 /* Register locations and bits */
 /* Register locations and bits */

+ 1 - 1
arch/arm/mach-kirkwood/include/mach/kirkwood.h

@@ -38,7 +38,7 @@
 
 
 #define KIRKWOOD_PCIE1_IO_PHYS_BASE	0xf3000000
 #define KIRKWOOD_PCIE1_IO_PHYS_BASE	0xf3000000
 #define KIRKWOOD_PCIE1_IO_VIRT_BASE	0xfef00000
 #define KIRKWOOD_PCIE1_IO_VIRT_BASE	0xfef00000
-#define KIRKWOOD_PCIE1_IO_BUS_BASE	0x00000000
+#define KIRKWOOD_PCIE1_IO_BUS_BASE	0x00100000
 #define KIRKWOOD_PCIE1_IO_SIZE		SZ_1M
 #define KIRKWOOD_PCIE1_IO_SIZE		SZ_1M
 
 
 #define KIRKWOOD_PCIE_IO_PHYS_BASE	0xf2000000
 #define KIRKWOOD_PCIE_IO_PHYS_BASE	0xf2000000

+ 2 - 2
arch/arm/mach-kirkwood/pcie.c

@@ -117,7 +117,7 @@ static void __init pcie0_ioresources_init(struct pcie_port *pp)
 	 * IORESOURCE_IO
 	 * IORESOURCE_IO
 	 */
 	 */
 	pp->res[0].name = "PCIe 0 I/O Space";
 	pp->res[0].name = "PCIe 0 I/O Space";
-	pp->res[0].start = KIRKWOOD_PCIE_IO_PHYS_BASE;
+	pp->res[0].start = KIRKWOOD_PCIE_IO_BUS_BASE;
 	pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE_IO_SIZE - 1;
 	pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE_IO_SIZE - 1;
 	pp->res[0].flags = IORESOURCE_IO;
 	pp->res[0].flags = IORESOURCE_IO;
 
 
@@ -139,7 +139,7 @@ static void __init pcie1_ioresources_init(struct pcie_port *pp)
 	 * IORESOURCE_IO
 	 * IORESOURCE_IO
 	 */
 	 */
 	pp->res[0].name = "PCIe 1 I/O Space";
 	pp->res[0].name = "PCIe 1 I/O Space";
-	pp->res[0].start = KIRKWOOD_PCIE1_IO_PHYS_BASE;
+	pp->res[0].start = KIRKWOOD_PCIE1_IO_BUS_BASE;
 	pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE1_IO_SIZE - 1;
 	pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE1_IO_SIZE - 1;
 	pp->res[0].flags = IORESOURCE_IO;
 	pp->res[0].flags = IORESOURCE_IO;
 
 

+ 6 - 1
arch/arm/mach-mmp/include/mach/system.h

@@ -9,6 +9,8 @@
 #ifndef __ASM_MACH_SYSTEM_H
 #ifndef __ASM_MACH_SYSTEM_H
 #define __ASM_MACH_SYSTEM_H
 #define __ASM_MACH_SYSTEM_H
 
 
+#include <mach/cputype.h>
+
 static inline void arch_idle(void)
 static inline void arch_idle(void)
 {
 {
 	cpu_do_idle();
 	cpu_do_idle();
@@ -16,6 +18,9 @@ static inline void arch_idle(void)
 
 
 static inline void arch_reset(char mode, const char *cmd)
 static inline void arch_reset(char mode, const char *cmd)
 {
 {
-	cpu_reset(0);
+	if (cpu_is_pxa168())
+		cpu_reset(0xffff0000);
+	else
+		cpu_reset(0);
 }
 }
 #endif /* __ASM_MACH_SYSTEM_H */
 #endif /* __ASM_MACH_SYSTEM_H */

+ 1 - 2
arch/arm/mach-pxa/cpufreq-pxa2xx.c

@@ -312,8 +312,7 @@ static int pxa_set_target(struct cpufreq_policy *policy,
 	freqs.cpu = policy->cpu;
 	freqs.cpu = policy->cpu;
 
 
 	if (freq_debug)
 	if (freq_debug)
-		pr_debug(KERN_INFO "Changing CPU frequency to %d Mhz, "
-			 "(SDRAM %d Mhz)\n",
+		pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n",
 			 freqs.new / 1000, (pxa_freq_settings[idx].div2) ?
 			 freqs.new / 1000, (pxa_freq_settings[idx].div2) ?
 			 (new_freq_mem / 2000) : (new_freq_mem / 1000));
 			 (new_freq_mem / 2000) : (new_freq_mem / 1000));
 
 

+ 13 - 1
arch/arm/mach-pxa/include/mach/hardware.h

@@ -264,23 +264,35 @@
  * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x
  * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x
  * == 0x3 for pxa300/pxa310/pxa320
  * == 0x3 for pxa300/pxa310/pxa320
  */
  */
+#if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x)
 #define __cpu_is_pxa2xx(id)				\
 #define __cpu_is_pxa2xx(id)				\
 	({						\
 	({						\
 		unsigned int _id = (id) >> 13 & 0x7;	\
 		unsigned int _id = (id) >> 13 & 0x7;	\
 		_id <= 0x2;				\
 		_id <= 0x2;				\
 	 })
 	 })
+#else
+#define __cpu_is_pxa2xx(id)	(0)
+#endif
 
 
+#ifdef CONFIG_PXA3xx
 #define __cpu_is_pxa3xx(id)				\
 #define __cpu_is_pxa3xx(id)				\
 	({						\
 	({						\
 		unsigned int _id = (id) >> 13 & 0x7;	\
 		unsigned int _id = (id) >> 13 & 0x7;	\
 		_id == 0x3;				\
 		_id == 0x3;				\
 	 })
 	 })
+#else
+#define __cpu_is_pxa3xx(id)	(0)
+#endif
 
 
+#if defined(CONFIG_CPU_PXA930) || defined(CONFIG_CPU_PXA935)
 #define __cpu_is_pxa93x(id)				\
 #define __cpu_is_pxa93x(id)				\
 	({						\
 	({						\
 		unsigned int _id = (id) >> 4 & 0xfff;	\
 		unsigned int _id = (id) >> 4 & 0xfff;	\
 		_id == 0x683 || _id == 0x693;		\
 		_id == 0x683 || _id == 0x693;		\
 	 })
 	 })
+#else
+#define __cpu_is_pxa93x(id)	(0)
+#endif
 
 
 #define cpu_is_pxa2xx()					\
 #define cpu_is_pxa2xx()					\
 	({						\
 	({						\
@@ -309,7 +321,7 @@ extern unsigned long get_clock_tick_rate(void);
 #define PCIBIOS_MIN_IO		0
 #define PCIBIOS_MIN_IO		0
 #define PCIBIOS_MIN_MEM		0
 #define PCIBIOS_MIN_MEM		0
 #define pcibios_assign_all_busses()	1
 #define pcibios_assign_all_busses()	1
+#define ARCH_HAS_DMA_SET_COHERENT_MASK
 #endif
 #endif
 
 
-
 #endif  /* _ASM_ARCH_HARDWARE_H */
 #endif  /* _ASM_ARCH_HARDWARE_H */

+ 2 - 0
arch/arm/mach-pxa/include/mach/io.h

@@ -6,6 +6,8 @@
 #ifndef __ASM_ARM_ARCH_IO_H
 #ifndef __ASM_ARM_ARCH_IO_H
 #define __ASM_ARM_ARCH_IO_H
 #define __ASM_ARM_ARCH_IO_H
 
 
+#include <mach/hardware.h>
+
 #define IO_SPACE_LIMIT 0xffffffff
 #define IO_SPACE_LIMIT 0xffffffff
 
 
 /*
 /*

+ 5 - 1
arch/arm/mach-pxa/palm27x.c

@@ -469,9 +469,13 @@ static struct i2c_board_info __initdata palm27x_pi2c_board_info[] = {
 	},
 	},
 };
 };
 
 
+static struct i2c_pxa_platform_data palm27x_i2c_power_info = {
+	.use_pio	= 1,
+};
+
 void __init palm27x_pmic_init(void)
 void __init palm27x_pmic_init(void)
 {
 {
 	i2c_register_board_info(1, ARRAY_AND_SIZE(palm27x_pi2c_board_info));
 	i2c_register_board_info(1, ARRAY_AND_SIZE(palm27x_pi2c_board_info));
-	pxa27x_set_i2c_power_info(NULL);
+	pxa27x_set_i2c_power_info(&palm27x_i2c_power_info);
 }
 }
 #endif
 #endif

+ 1 - 0
arch/arm/mach-pxa/vpac270.c

@@ -240,6 +240,7 @@ static void __init vpac270_onenand_init(void) {}
 #if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE)
 #if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE)
 static struct pxamci_platform_data vpac270_mci_platform_data = {
 static struct pxamci_platform_data vpac270_mci_platform_data = {
 	.ocr_mask		= MMC_VDD_32_33 | MMC_VDD_33_34,
 	.ocr_mask		= MMC_VDD_32_33 | MMC_VDD_33_34,
+	.gpio_power		= -1,
 	.gpio_card_detect	= GPIO53_VPAC270_SD_DETECT_N,
 	.gpio_card_detect	= GPIO53_VPAC270_SD_DETECT_N,
 	.gpio_card_ro		= GPIO52_VPAC270_SD_READONLY,
 	.gpio_card_ro		= GPIO52_VPAC270_SD_READONLY,
 	.detect_delay_ms	= 200,
 	.detect_delay_ms	= 200,

+ 3 - 0
arch/arm/mach-u300/include/mach/gpio.h

@@ -273,6 +273,9 @@ extern void gpio_pullup(unsigned gpio, int value);
 extern int gpio_get_value(unsigned gpio);
 extern int gpio_get_value(unsigned gpio);
 extern void gpio_set_value(unsigned gpio, int value);
 extern void gpio_set_value(unsigned gpio, int value);
 
 
+#define gpio_get_value_cansleep gpio_get_value
+#define gpio_set_value_cansleep gpio_set_value
+
 /* wrappers to sleep-enable the previous two functions */
 /* wrappers to sleep-enable the previous two functions */
 static inline unsigned gpio_to_irq(unsigned gpio)
 static inline unsigned gpio_to_irq(unsigned gpio)
 {
 {

+ 7 - 1
arch/arm/mach-vexpress/ct-ca9x4.c

@@ -227,7 +227,13 @@ static void ct_ca9x4_init(void)
 	int i;
 	int i;
 
 
 #ifdef CONFIG_CACHE_L2X0
 #ifdef CONFIG_CACHE_L2X0
-	l2x0_init(MMIO_P2V(CT_CA9X4_L2CC), 0x00000000, 0xfe0fffff);
+	void __iomem *l2x0_base = MMIO_P2V(CT_CA9X4_L2CC);
+
+	/* set RAM latencies to 1 cycle for this core tile. */
+	writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL);
+	writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL);
+
+	l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
 #endif
 #endif
 
 
 	clkdev_add_table(lookups, ARRAY_SIZE(lookups));
 	clkdev_add_table(lookups, ARRAY_SIZE(lookups));

+ 17 - 2
arch/arm/mm/alignment.c

@@ -885,8 +885,23 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 
 
 	if (ai_usermode & UM_SIGNAL)
 	if (ai_usermode & UM_SIGNAL)
 		force_sig(SIGBUS, current);
 		force_sig(SIGBUS, current);
-	else
-		set_cr(cr_no_alignment);
+	else {
+		/*
+		 * We're about to disable the alignment trap and return to
+		 * user space.  But if an interrupt occurs before actually
+		 * reaching user space, then the IRQ vector entry code will
+		 * notice that we were still in kernel space and therefore
+		 * the alignment trap won't be re-enabled in that case as it
+		 * is presumed to be always on from kernel space.
+		 * Let's prevent that race by disabling interrupts here (they
+		 * are disabled on the way back to user space anyway in
+		 * entry-common.S) and disable the alignment trap only if
+		 * there is no work pending for this thread.
+		 */
+		raw_local_irq_disable();
+		if (!(current_thread_info()->flags & _TIF_WORK_MASK))
+			set_cr(cr_no_alignment);
+	}
 
 
 	return 0;
 	return 0;
 }
 }

+ 29 - 2
arch/arm/mm/mmu.c

@@ -15,6 +15,7 @@
 #include <linux/nodemask.h>
 #include <linux/nodemask.h>
 #include <linux/memblock.h>
 #include <linux/memblock.h>
 #include <linux/sort.h>
 #include <linux/sort.h>
+#include <linux/fs.h>
 
 
 #include <asm/cputype.h>
 #include <asm/cputype.h>
 #include <asm/sections.h>
 #include <asm/sections.h>
@@ -246,6 +247,9 @@ static struct mem_type mem_types[] = {
 		.domain    = DOMAIN_USER,
 		.domain    = DOMAIN_USER,
 	},
 	},
 	[MT_MEMORY] = {
 	[MT_MEMORY] = {
+		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+				L_PTE_USER | L_PTE_EXEC,
+		.prot_l1   = PMD_TYPE_TABLE,
 		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
 		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
 		.domain    = DOMAIN_KERNEL,
 		.domain    = DOMAIN_KERNEL,
 	},
 	},
@@ -254,6 +258,9 @@ static struct mem_type mem_types[] = {
 		.domain    = DOMAIN_KERNEL,
 		.domain    = DOMAIN_KERNEL,
 	},
 	},
 	[MT_MEMORY_NONCACHED] = {
 	[MT_MEMORY_NONCACHED] = {
+		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+				L_PTE_USER | L_PTE_EXEC | L_PTE_MT_BUFFERABLE,
+		.prot_l1   = PMD_TYPE_TABLE,
 		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
 		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
 		.domain    = DOMAIN_KERNEL,
 		.domain    = DOMAIN_KERNEL,
 	},
 	},
@@ -411,9 +418,12 @@ static void __init build_mem_type_table(void)
 	 * Enable CPU-specific coherency if supported.
 	 * Enable CPU-specific coherency if supported.
 	 * (Only available on XSC3 at the moment.)
 	 * (Only available on XSC3 at the moment.)
 	 */
 	 */
-	if (arch_is_coherent() && cpu_is_xsc3())
+	if (arch_is_coherent() && cpu_is_xsc3()) {
 		mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
 		mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
-
+		mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+		mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
+		mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
+	}
 	/*
 	/*
 	 * ARMv6 and above have extended page tables.
 	 * ARMv6 and above have extended page tables.
 	 */
 	 */
@@ -438,7 +448,9 @@ static void __init build_mem_type_table(void)
 		mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
 		mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
 		mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
 		mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
 		mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
 		mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
+		mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
 		mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
 		mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
+		mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
 #endif
 #endif
 	}
 	}
 
 
@@ -475,6 +487,8 @@ static void __init build_mem_type_table(void)
 	mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
 	mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
 	mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
 	mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
 	mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
 	mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
+	mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
+	mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
 	mem_types[MT_ROM].prot_sect |= cp->pmd;
 	mem_types[MT_ROM].prot_sect |= cp->pmd;
 
 
 	switch (cp->pmd) {
 	switch (cp->pmd) {
@@ -498,6 +512,19 @@ static void __init build_mem_type_table(void)
 	}
 	}
 }
 }
 
 
+#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
+pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+			      unsigned long size, pgprot_t vma_prot)
+{
+	if (!pfn_valid(pfn))
+		return pgprot_noncached(vma_prot);
+	else if (file->f_flags & O_SYNC)
+		return pgprot_writecombine(vma_prot);
+	return vma_prot;
+}
+EXPORT_SYMBOL(phys_mem_access_prot);
+#endif
+
 #define vectors_base()	(vectors_high() ? 0xffff0000 : 0)
 #define vectors_base()	(vectors_high() ? 0xffff0000 : 0)
 
 
 static void __init *early_alloc(unsigned long sz)
 static void __init *early_alloc(unsigned long sz)

+ 56 - 6
arch/arm/mm/proc-v7.S

@@ -186,13 +186,14 @@ cpu_v7_name:
  *	It is assumed that:
  *	It is assumed that:
  *	- cache type register is implemented
  *	- cache type register is implemented
  */
  */
-__v7_setup:
+__v7_ca9mp_setup:
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
 	mrc	p15, 0, r0, c1, c0, 1
 	mrc	p15, 0, r0, c1, c0, 1
 	tst	r0, #(1 << 6)			@ SMP/nAMP mode enabled?
 	tst	r0, #(1 << 6)			@ SMP/nAMP mode enabled?
 	orreq	r0, r0, #(1 << 6) | (1 << 0)	@ Enable SMP/nAMP mode and
 	orreq	r0, r0, #(1 << 6) | (1 << 0)	@ Enable SMP/nAMP mode and
 	mcreq	p15, 0, r0, c1, c0, 1		@ TLB ops broadcasting
 	mcreq	p15, 0, r0, c1, c0, 1		@ TLB ops broadcasting
 #endif
 #endif
+__v7_setup:
 	adr	r12, __v7_setup_stack		@ the local stack
 	adr	r12, __v7_setup_stack		@ the local stack
 	stmia	r12, {r0-r5, r7, r9, r11, lr}
 	stmia	r12, {r0-r5, r7, r9, r11, lr}
 	bl	v7_flush_dcache_all
 	bl	v7_flush_dcache_all
@@ -201,11 +202,16 @@ __v7_setup:
 	mrc	p15, 0, r0, c0, c0, 0		@ read main ID register
 	mrc	p15, 0, r0, c0, c0, 0		@ read main ID register
 	and	r10, r0, #0xff000000		@ ARM?
 	and	r10, r0, #0xff000000		@ ARM?
 	teq	r10, #0x41000000
 	teq	r10, #0x41000000
-	bne	2f
+	bne	3f
 	and	r5, r0, #0x00f00000		@ variant
 	and	r5, r0, #0x00f00000		@ variant
 	and	r6, r0, #0x0000000f		@ revision
 	and	r6, r0, #0x0000000f		@ revision
-	orr	r0, r6, r5, lsr #20-4		@ combine variant and revision
+	orr	r6, r6, r5, lsr #20-4		@ combine variant and revision
+	ubfx	r0, r0, #4, #12			@ primary part number
 
 
+	/* Cortex-A8 Errata */
+	ldr	r10, =0x00000c08		@ Cortex-A8 primary part number
+	teq	r0, r10
+	bne	2f
 #ifdef CONFIG_ARM_ERRATA_430973
 #ifdef CONFIG_ARM_ERRATA_430973
 	teq	r5, #0x00100000			@ only present in r1p*
 	teq	r5, #0x00100000			@ only present in r1p*
 	mrceq	p15, 0, r10, c1, c0, 1		@ read aux control register
 	mrceq	p15, 0, r10, c1, c0, 1		@ read aux control register
@@ -213,21 +219,42 @@ __v7_setup:
 	mcreq	p15, 0, r10, c1, c0, 1		@ write aux control register
 	mcreq	p15, 0, r10, c1, c0, 1		@ write aux control register
 #endif
 #endif
 #ifdef CONFIG_ARM_ERRATA_458693
 #ifdef CONFIG_ARM_ERRATA_458693
-	teq	r0, #0x20			@ only present in r2p0
+	teq	r6, #0x20			@ only present in r2p0
 	mrceq	p15, 0, r10, c1, c0, 1		@ read aux control register
 	mrceq	p15, 0, r10, c1, c0, 1		@ read aux control register
 	orreq	r10, r10, #(1 << 5)		@ set L1NEON to 1
 	orreq	r10, r10, #(1 << 5)		@ set L1NEON to 1
 	orreq	r10, r10, #(1 << 9)		@ set PLDNOP to 1
 	orreq	r10, r10, #(1 << 9)		@ set PLDNOP to 1
 	mcreq	p15, 0, r10, c1, c0, 1		@ write aux control register
 	mcreq	p15, 0, r10, c1, c0, 1		@ write aux control register
 #endif
 #endif
 #ifdef CONFIG_ARM_ERRATA_460075
 #ifdef CONFIG_ARM_ERRATA_460075
-	teq	r0, #0x20			@ only present in r2p0
+	teq	r6, #0x20			@ only present in r2p0
 	mrceq	p15, 1, r10, c9, c0, 2		@ read L2 cache aux ctrl register
 	mrceq	p15, 1, r10, c9, c0, 2		@ read L2 cache aux ctrl register
 	tsteq	r10, #1 << 22
 	tsteq	r10, #1 << 22
 	orreq	r10, r10, #(1 << 22)		@ set the Write Allocate disable bit
 	orreq	r10, r10, #(1 << 22)		@ set the Write Allocate disable bit
 	mcreq	p15, 1, r10, c9, c0, 2		@ write the L2 cache aux ctrl register
 	mcreq	p15, 1, r10, c9, c0, 2		@ write the L2 cache aux ctrl register
 #endif
 #endif
+	b	3f
+
+	/* Cortex-A9 Errata */
+2:	ldr	r10, =0x00000c09		@ Cortex-A9 primary part number
+	teq	r0, r10
+	bne	3f
+#ifdef CONFIG_ARM_ERRATA_742230
+	cmp	r6, #0x22			@ only present up to r2p2
+	mrcle	p15, 0, r10, c15, c0, 1		@ read diagnostic register
+	orrle	r10, r10, #1 << 4		@ set bit #4
+	mcrle	p15, 0, r10, c15, c0, 1		@ write diagnostic register
+#endif
+#ifdef CONFIG_ARM_ERRATA_742231
+	teq	r6, #0x20			@ present in r2p0
+	teqne	r6, #0x21			@ present in r2p1
+	teqne	r6, #0x22			@ present in r2p2
+	mrceq	p15, 0, r10, c15, c0, 1		@ read diagnostic register
+	orreq	r10, r10, #1 << 12		@ set bit #12
+	orreq	r10, r10, #1 << 22		@ set bit #22
+	mcreq	p15, 0, r10, c15, c0, 1		@ write diagnostic register
+#endif
 
 
-2:	mov	r10, #0
+3:	mov	r10, #0
 #ifdef HARVARD_CACHE
 #ifdef HARVARD_CACHE
 	mcr	p15, 0, r10, c7, c5, 0		@ I+BTB cache invalidate
 	mcr	p15, 0, r10, c7, c5, 0		@ I+BTB cache invalidate
 #endif
 #endif
@@ -323,6 +350,29 @@ cpu_elf_name:
 
 
 	.section ".proc.info.init", #alloc, #execinstr
 	.section ".proc.info.init", #alloc, #execinstr
 
 
+	.type   __v7_ca9mp_proc_info, #object
+__v7_ca9mp_proc_info:
+	.long	0x410fc090		@ Required ID value
+	.long	0xff0ffff0		@ Mask for ID
+	.long   PMD_TYPE_SECT | \
+		PMD_SECT_AP_WRITE | \
+		PMD_SECT_AP_READ | \
+		PMD_FLAGS
+	.long   PMD_TYPE_SECT | \
+		PMD_SECT_XN | \
+		PMD_SECT_AP_WRITE | \
+		PMD_SECT_AP_READ
+	b	__v7_ca9mp_setup
+	.long	cpu_arch_name
+	.long	cpu_elf_name
+	.long	HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
+	.long	cpu_v7_name
+	.long	v7_processor_functions
+	.long	v7wbi_tlb_fns
+	.long	v6_user_fns
+	.long	v7_cache_fns
+	.size	__v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
+
 	/*
 	/*
 	 * Match any ARMv7 processor core.
 	 * Match any ARMv7 processor core.
 	 */
 	 */

+ 12 - 21
arch/arm/plat-nomadik/timer.c

@@ -1,5 +1,5 @@
 /*
 /*
- *  linux/arch/arm/mach-nomadik/timer.c
+ *  linux/arch/arm/plat-nomadik/timer.c
  *
  *
  * Copyright (C) 2008 STMicroelectronics
  * Copyright (C) 2008 STMicroelectronics
  * Copyright (C) 2010 Alessandro Rubini
  * Copyright (C) 2010 Alessandro Rubini
@@ -75,7 +75,7 @@ static void nmdk_clkevt_mode(enum clock_event_mode mode,
 		cr = readl(mtu_base + MTU_CR(1));
 		cr = readl(mtu_base + MTU_CR(1));
 		writel(0, mtu_base + MTU_LR(1));
 		writel(0, mtu_base + MTU_LR(1));
 		writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1));
 		writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1));
-		writel(0x2, mtu_base + MTU_IMSC);
+		writel(1 << 1, mtu_base + MTU_IMSC);
 		break;
 		break;
 	case CLOCK_EVT_MODE_SHUTDOWN:
 	case CLOCK_EVT_MODE_SHUTDOWN:
 	case CLOCK_EVT_MODE_UNUSED:
 	case CLOCK_EVT_MODE_UNUSED:
@@ -131,25 +131,23 @@ void __init nmdk_timer_init(void)
 {
 {
 	unsigned long rate;
 	unsigned long rate;
 	struct clk *clk0;
 	struct clk *clk0;
-	struct clk *clk1;
-	u32 cr;
+	u32 cr = MTU_CRn_32BITS;
 
 
 	clk0 = clk_get_sys("mtu0", NULL);
 	clk0 = clk_get_sys("mtu0", NULL);
 	BUG_ON(IS_ERR(clk0));
 	BUG_ON(IS_ERR(clk0));
 
 
-	clk1 = clk_get_sys("mtu1", NULL);
-	BUG_ON(IS_ERR(clk1));
-
 	clk_enable(clk0);
 	clk_enable(clk0);
-	clk_enable(clk1);
 
 
 	/*
 	/*
-	 * Tick rate is 2.4MHz for Nomadik and 110MHz for ux500:
-	 * use a divide-by-16 counter if it's more than 16MHz
+	 * Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz
+	 * for ux500.
+	 * Use a divide-by-16 counter if the tick rate is more than 32MHz.
+	 * At 32 MHz, the timer (with 32 bit counter) can be programmed
+	 * to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer
+	 * with 16 gives too low timer resolution.
 	 */
 	 */
-	cr = MTU_CRn_32BITS;;
 	rate = clk_get_rate(clk0);
 	rate = clk_get_rate(clk0);
-	if (rate > 16 << 20) {
+	if (rate > 32000000) {
 		rate /= 16;
 		rate /= 16;
 		cr |= MTU_CRn_PRESCALE_16;
 		cr |= MTU_CRn_PRESCALE_16;
 	} else {
 	} else {
@@ -170,15 +168,8 @@ void __init nmdk_timer_init(void)
 		pr_err("timer: failed to initialize clock source %s\n",
 		pr_err("timer: failed to initialize clock source %s\n",
 		       nmdk_clksrc.name);
 		       nmdk_clksrc.name);
 
 
-	/* Timer 1 is used for events, fix according to rate */
-	cr = MTU_CRn_32BITS;
-	rate = clk_get_rate(clk1);
-	if (rate > 16 << 20) {
-		rate /= 16;
-		cr |= MTU_CRn_PRESCALE_16;
-	} else {
-		cr |= MTU_CRn_PRESCALE_1;
-	}
+	/* Timer 1 is used for events */
+
 	clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE);
 	clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE);
 
 
 	writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */
 	writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */

+ 1 - 1
arch/arm/plat-omap/Kconfig

@@ -33,7 +33,7 @@ config OMAP_DEBUG_DEVICES
 config OMAP_DEBUG_LEDS
 config OMAP_DEBUG_LEDS
 	bool
 	bool
 	depends on OMAP_DEBUG_DEVICES
 	depends on OMAP_DEBUG_DEVICES
-	default y if LEDS
+	default y if LEDS_CLASS
 
 
 config OMAP_RESET_CLOCKS
 config OMAP_RESET_CLOCKS
 	bool "Reset unused clocks during boot"
 	bool "Reset unused clocks during boot"

+ 1 - 1
arch/arm/plat-omap/mcbsp.c

@@ -156,7 +156,7 @@ static irqreturn_t omap_mcbsp_rx_irq_handler(int irq, void *dev_id)
 		/* Writing zero to RSYNC_ERR clears the IRQ */
 		/* Writing zero to RSYNC_ERR clears the IRQ */
 		MCBSP_WRITE(mcbsp_rx, SPCR1, MCBSP_READ_CACHE(mcbsp_rx, SPCR1));
 		MCBSP_WRITE(mcbsp_rx, SPCR1, MCBSP_READ_CACHE(mcbsp_rx, SPCR1));
 	} else {
 	} else {
-		complete(&mcbsp_rx->tx_irq_completion);
+		complete(&mcbsp_rx->rx_irq_completion);
 	}
 	}
 
 
 	return IRQ_HANDLED;
 	return IRQ_HANDLED;

+ 5 - 20
arch/arm/plat-omap/sram.c

@@ -220,20 +220,7 @@ void __init omap_map_sram(void)
 	if (omap_sram_size == 0)
 	if (omap_sram_size == 0)
 		return;
 		return;
 
 
-	if (cpu_is_omap24xx()) {
-		omap_sram_io_desc[0].virtual = OMAP2_SRAM_VA;
-
-		base = OMAP2_SRAM_PA;
-		base = ROUND_DOWN(base, PAGE_SIZE);
-		omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
-	}
-
 	if (cpu_is_omap34xx()) {
 	if (cpu_is_omap34xx()) {
-		omap_sram_io_desc[0].virtual = OMAP3_SRAM_VA;
-		base = OMAP3_SRAM_PA;
-		base = ROUND_DOWN(base, PAGE_SIZE);
-		omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
-
 		/*
 		/*
 		 * SRAM must be marked as non-cached on OMAP3 since the
 		 * SRAM must be marked as non-cached on OMAP3 since the
 		 * CORE DPLL M2 divider change code (in SRAM) runs with the
 		 * CORE DPLL M2 divider change code (in SRAM) runs with the
@@ -244,13 +231,11 @@ void __init omap_map_sram(void)
 		omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED;
 		omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED;
 	}
 	}
 
 
-	if (cpu_is_omap44xx()) {
-		omap_sram_io_desc[0].virtual = OMAP4_SRAM_VA;
-		base = OMAP4_SRAM_PA;
-		base = ROUND_DOWN(base, PAGE_SIZE);
-		omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
-	}
-	omap_sram_io_desc[0].length = 1024 * 1024;	/* Use section desc */
+	omap_sram_io_desc[0].virtual = omap_sram_base;
+	base = omap_sram_start;
+	base = ROUND_DOWN(base, PAGE_SIZE);
+	omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
+	omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE);
 	iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc));
 	iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc));
 
 
 	printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n",
 	printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n",

+ 1 - 2
arch/avr32/kernel/module.c

@@ -314,10 +314,9 @@ int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
 	vfree(module->arch.syminfo);
 	vfree(module->arch.syminfo);
 	module->arch.syminfo = NULL;
 	module->arch.syminfo = NULL;
 
 
-	return module_bug_finalize(hdr, sechdrs, module);
+	return 0;
 }
 }
 
 
 void module_arch_cleanup(struct module *module)
 void module_arch_cleanup(struct module *module)
 {
 {
-	module_bug_cleanup(module);
 }
 }

+ 1 - 2
arch/h8300/kernel/module.c

@@ -112,10 +112,9 @@ int module_finalize(const Elf_Ehdr *hdr,
 		    const Elf_Shdr *sechdrs,
 		    const Elf_Shdr *sechdrs,
 		    struct module *me)
 		    struct module *me)
 {
 {
-	return module_bug_finalize(hdr, sechdrs, me);
+	return 0;
 }
 }
 
 
 void module_arch_cleanup(struct module *mod)
 void module_arch_cleanup(struct module *mod)
 {
 {
-	module_bug_cleanup(mod);
 }
 }

+ 0 - 1
arch/m32r/include/asm/signal.h

@@ -157,7 +157,6 @@ typedef struct sigaltstack {
 #undef __HAVE_ARCH_SIG_BITOPS
 #undef __HAVE_ARCH_SIG_BITOPS
 
 
 struct pt_regs;
 struct pt_regs;
-extern int do_signal(struct pt_regs *regs, sigset_t *oldset);
 
 
 #define ptrace_signal_deliver(regs, cookie)	do { } while (0)
 #define ptrace_signal_deliver(regs, cookie)	do { } while (0)
 
 

+ 1 - 0
arch/m32r/include/asm/unistd.h

@@ -351,6 +351,7 @@
 #define __ARCH_WANT_SYS_OLD_GETRLIMIT /*will be unused*/
 #define __ARCH_WANT_SYS_OLD_GETRLIMIT /*will be unused*/
 #define __ARCH_WANT_SYS_OLDUMOUNT
 #define __ARCH_WANT_SYS_OLDUMOUNT
 #define __ARCH_WANT_SYS_RT_SIGACTION
 #define __ARCH_WANT_SYS_RT_SIGACTION
+#define __ARCH_WANT_SYS_RT_SIGSUSPEND
 
 
 #define __IGNORE_lchown
 #define __IGNORE_lchown
 #define __IGNORE_setuid
 #define __IGNORE_setuid

+ 2 - 3
arch/m32r/kernel/entry.S

@@ -235,10 +235,9 @@ work_resched:
 work_notifysig:				; deal with pending signals and
 work_notifysig:				; deal with pending signals and
 					; notify-resume requests
 					; notify-resume requests
 	mv	r0, sp			; arg1 : struct pt_regs *regs
 	mv	r0, sp			; arg1 : struct pt_regs *regs
-	ldi	r1, #0			; arg2 : sigset_t *oldset
-	mv	r2, r9			; arg3 : __u32 thread_info_flags
+	mv	r1, r9			; arg2 : __u32 thread_info_flags
 	bl	do_notify_resume
 	bl	do_notify_resume
-	bra	restore_all
+	bra	resume_userspace
 
 
 	; perform syscall exit tracing
 	; perform syscall exit tracing
 	ALIGN
 	ALIGN

+ 4 - 3
arch/m32r/kernel/ptrace.c

@@ -592,16 +592,17 @@ void user_enable_single_step(struct task_struct *child)
 
 
 	if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
 	if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
 	    != sizeof(insn))
 	    != sizeof(insn))
-		break;
+		return -EIO;
 
 
 	compute_next_pc(insn, pc, &next_pc, child);
 	compute_next_pc(insn, pc, &next_pc, child);
 	if (next_pc & 0x80000000)
 	if (next_pc & 0x80000000)
-		break;
+		return -EIO;
 
 
 	if (embed_debug_trap(child, next_pc))
 	if (embed_debug_trap(child, next_pc))
-		break;
+		return -EIO;
 
 
 	invalidate_cache();
 	invalidate_cache();
+	return 0;
 }
 }
 
 
 void user_disable_single_step(struct task_struct *child)
 void user_disable_single_step(struct task_struct *child)

+ 41 - 64
arch/m32r/kernel/signal.c

@@ -28,37 +28,6 @@
 
 
 #define DEBUG_SIG 0
 #define DEBUG_SIG 0
 
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
-int do_signal(struct pt_regs *, sigset_t *);
-
-asmlinkage int
-sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize,
-		  unsigned long r2, unsigned long r3, unsigned long r4,
-		  unsigned long r5, unsigned long r6, struct pt_regs *regs)
-{
-	sigset_t newset;
-
-	/* XXX: Don't preclude handling different sized sigset_t's.  */
-	if (sigsetsize != sizeof(sigset_t))
-		return -EINVAL;
-
-	if (copy_from_user(&newset, unewset, sizeof(newset)))
-		return -EFAULT;
-	sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
-
-	spin_lock_irq(&current->sighand->siglock);
-	current->saved_sigmask = current->blocked;
-	current->blocked = newset;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
-
-	current->state = TASK_INTERRUPTIBLE;
-	schedule();
-	set_thread_flag(TIF_RESTORE_SIGMASK);
-	return -ERESTARTNOHAND;
-}
-
 asmlinkage int
 asmlinkage int
 sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
 sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
 		unsigned long r2, unsigned long r3, unsigned long r4,
 		unsigned long r2, unsigned long r3, unsigned long r4,
@@ -218,7 +187,7 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
 	return (void __user *)((sp - frame_size) & -8ul);
 	return (void __user *)((sp - frame_size) & -8ul);
 }
 }
 
 
-static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
 			   sigset_t *set, struct pt_regs *regs)
 			   sigset_t *set, struct pt_regs *regs)
 {
 {
 	struct rt_sigframe __user *frame;
 	struct rt_sigframe __user *frame;
@@ -275,22 +244,34 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
 		current->comm, current->pid, frame, regs->pc);
 		current->comm, current->pid, frame, regs->pc);
 #endif
 #endif
 
 
-	return;
+	return 0;
 
 
 give_sigsegv:
 give_sigsegv:
 	force_sigsegv(sig, current);
 	force_sigsegv(sig, current);
+	return -EFAULT;
+}
+
+static int prev_insn(struct pt_regs *regs)
+{
+	u16 inst;
+	if (get_user(&inst, (u16 __user *)(regs->bpc - 2)))
+		return -EFAULT;
+	if ((inst & 0xfff0) == 0x10f0)	/* trap ? */
+		regs->bpc -= 2;
+	else
+		regs->bpc -= 4;
+	regs->syscall_nr = -1;
+	return 0;
 }
 }
 
 
 /*
 /*
  * OK, we're invoking a handler
  * OK, we're invoking a handler
  */
  */
 
 
-static void
+static int
 handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
 handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
 	      sigset_t *oldset, struct pt_regs *regs)
 	      sigset_t *oldset, struct pt_regs *regs)
 {
 {
-	unsigned short inst;
-
 	/* Are we from a system call? */
 	/* Are we from a system call? */
 	if (regs->syscall_nr >= 0) {
 	if (regs->syscall_nr >= 0) {
 		/* If so, check system call restarting.. */
 		/* If so, check system call restarting.. */
@@ -308,16 +289,14 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
 			/* fallthrough */
 			/* fallthrough */
 			case -ERESTARTNOINTR:
 			case -ERESTARTNOINTR:
 				regs->r0 = regs->orig_r0;
 				regs->r0 = regs->orig_r0;
-				inst = *(unsigned short *)(regs->bpc - 2);
-				if ((inst & 0xfff0) == 0x10f0)	/* trap ? */
-					regs->bpc -= 2;
-				else
-					regs->bpc -= 4;
+				if (prev_insn(regs) < 0)
+					return -EFAULT;
 		}
 		}
 	}
 	}
 
 
 	/* Set up the stack frame */
 	/* Set up the stack frame */
-	setup_rt_frame(sig, ka, info, oldset, regs);
+	if (setup_rt_frame(sig, ka, info, oldset, regs))
+		return -EFAULT;
 
 
 	spin_lock_irq(&current->sighand->siglock);
 	spin_lock_irq(&current->sighand->siglock);
 	sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
 	sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
@@ -325,6 +304,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
 		sigaddset(&current->blocked,sig);
 		sigaddset(&current->blocked,sig);
 	recalc_sigpending();
 	recalc_sigpending();
 	spin_unlock_irq(&current->sighand->siglock);
 	spin_unlock_irq(&current->sighand->siglock);
+	return 0;
 }
 }
 
 
 /*
 /*
@@ -332,12 +312,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
  * want to handle. Thus you cannot kill init even with a SIGKILL even by
  * want to handle. Thus you cannot kill init even with a SIGKILL even by
  * mistake.
  * mistake.
  */
  */
-int do_signal(struct pt_regs *regs, sigset_t *oldset)
+static void do_signal(struct pt_regs *regs)
 {
 {
 	siginfo_t info;
 	siginfo_t info;
 	int signr;
 	int signr;
 	struct k_sigaction ka;
 	struct k_sigaction ka;
-	unsigned short inst;
+	sigset_t *oldset;
 
 
 	/*
 	/*
 	 * We want the common case to go fast, which
 	 * We want the common case to go fast, which
@@ -346,12 +326,14 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
 	 * if so.
 	 * if so.
 	 */
 	 */
 	if (!user_mode(regs))
 	if (!user_mode(regs))
-		return 1;
+		return;
 
 
 	if (try_to_freeze()) 
 	if (try_to_freeze()) 
 		goto no_signal;
 		goto no_signal;
 
 
-	if (!oldset)
+	if (test_thread_flag(TIF_RESTORE_SIGMASK))
+		oldset = &current->saved_sigmask;
+	else
 		oldset = &current->blocked;
 		oldset = &current->blocked;
 
 
 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
@@ -363,8 +345,10 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
 		 */
 		 */
 
 
 		/* Whee!  Actually deliver the signal.  */
 		/* Whee!  Actually deliver the signal.  */
-		handle_signal(signr, &ka, &info, oldset, regs);
-		return 1;
+		if (handle_signal(signr, &ka, &info, oldset, regs) == 0)
+			clear_thread_flag(TIF_RESTORE_SIGMASK);
+
+		return;
 	}
 	}
 
 
  no_signal:
  no_signal:
@@ -375,31 +359,24 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
 		    regs->r0 == -ERESTARTSYS ||
 		    regs->r0 == -ERESTARTSYS ||
 		    regs->r0 == -ERESTARTNOINTR) {
 		    regs->r0 == -ERESTARTNOINTR) {
 			regs->r0 = regs->orig_r0;
 			regs->r0 = regs->orig_r0;
-			inst = *(unsigned short *)(regs->bpc - 2);
-			if ((inst & 0xfff0) == 0x10f0)	/* trap ? */
-				regs->bpc -= 2;
-			else
-				regs->bpc -= 4;
-		}
-		if (regs->r0 == -ERESTART_RESTARTBLOCK){
+			prev_insn(regs);
+		} else if (regs->r0 == -ERESTART_RESTARTBLOCK){
 			regs->r0 = regs->orig_r0;
 			regs->r0 = regs->orig_r0;
 			regs->r7 = __NR_restart_syscall;
 			regs->r7 = __NR_restart_syscall;
-			inst = *(unsigned short *)(regs->bpc - 2);
-			if ((inst & 0xfff0) == 0x10f0)	/* trap ? */
-				regs->bpc -= 2;
-			else
-				regs->bpc -= 4;
+			prev_insn(regs);
 		}
 		}
 	}
 	}
-	return 0;
+	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+		clear_thread_flag(TIF_RESTORE_SIGMASK);
+		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+	}
 }
 }
 
 
 /*
 /*
  * notification of userspace execution resumption
  * notification of userspace execution resumption
  * - triggered by current->work.notify_resume
  * - triggered by current->work.notify_resume
  */
  */
-void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
-		      __u32 thread_info_flags)
+void do_notify_resume(struct pt_regs *regs, __u32 thread_info_flags)
 {
 {
 	/* Pending single-step? */
 	/* Pending single-step? */
 	if (thread_info_flags & _TIF_SINGLESTEP)
 	if (thread_info_flags & _TIF_SINGLESTEP)
@@ -407,7 +384,7 @@ void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
 
 
 	/* deal with pending signal delivery */
 	/* deal with pending signal delivery */
 	if (thread_info_flags & _TIF_SIGPENDING)
 	if (thread_info_flags & _TIF_SIGPENDING)
-		do_signal(regs,oldset);
+		do_signal(regs);
 
 
 	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
 	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
 		clear_thread_flag(TIF_NOTIFY_RESUME);
 		clear_thread_flag(TIF_NOTIFY_RESUME);

+ 3 - 3
arch/m68k/mac/macboing.c

@@ -162,7 +162,7 @@ static void mac_init_asc( void )
 void mac_mksound( unsigned int freq, unsigned int length )
 void mac_mksound( unsigned int freq, unsigned int length )
 {
 {
 	__u32 cfreq = ( freq << 5 ) / 468;
 	__u32 cfreq = ( freq << 5 ) / 468;
-	__u32 flags;
+	unsigned long flags;
 	int i;
 	int i;
 
 
 	if ( mac_special_bell == NULL )
 	if ( mac_special_bell == NULL )
@@ -224,7 +224,7 @@ static void mac_nosound( unsigned long ignored )
  */
  */
 static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsigned int volume )
 static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsigned int volume )
 {
 {
-	__u32 flags;
+	unsigned long flags;
 
 
 	/* if the bell is already ringing, ring longer */
 	/* if the bell is already ringing, ring longer */
 	if ( mac_bell_duration > 0 )
 	if ( mac_bell_duration > 0 )
@@ -271,7 +271,7 @@ static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsig
 static void mac_quadra_ring_bell( unsigned long ignored )
 static void mac_quadra_ring_bell( unsigned long ignored )
 {
 {
 	int	i, count = mac_asc_samplespersec / HZ;
 	int	i, count = mac_asc_samplespersec / HZ;
-	__u32 flags;
+	unsigned long flags;
 
 
 	/*
 	/*
 	 * we neither want a sound buffer overflow nor underflow, so we need to match
 	 * we neither want a sound buffer overflow nor underflow, so we need to match

+ 19 - 2
arch/mips/Kconfig

@@ -13,6 +13,7 @@ config MIPS
 	select HAVE_KPROBES
 	select HAVE_KPROBES
 	select HAVE_KRETPROBES
 	select HAVE_KRETPROBES
 	select RTC_LIB if !MACH_LOONGSON
 	select RTC_LIB if !MACH_LOONGSON
+	select GENERIC_ATOMIC64 if !64BIT
 
 
 mainmenu "Linux/MIPS Kernel Configuration"
 mainmenu "Linux/MIPS Kernel Configuration"
 
 
@@ -1646,8 +1647,16 @@ config MIPS_MT_SMP
 	select SYS_SUPPORTS_SMP
 	select SYS_SUPPORTS_SMP
 	select SMP_UP
 	select SMP_UP
 	help
 	help
-	  This is a kernel model which is also known a VSMP or lately
-	  has been marketesed into SMVP.
+	  This is a kernel model which is known a VSMP but lately has been
+	  marketesed into SMVP.
+	  Virtual SMP uses the processor's VPEs  to implement virtual
+	  processors. In currently available configuration of the 34K processor
+	  this allows for a dual processor. Both processors will share the same
+	  primary caches; each will obtain the half of the TLB for it's own
+	  exclusive use. For a layman this model can be described as similar to
+	  what Intel calls Hyperthreading.
+
+	  For further information see http://www.linux-mips.org/wiki/34K#VSMP
 
 
 config MIPS_MT_SMTC
 config MIPS_MT_SMTC
 	bool "SMTC: Use all TCs on all VPEs for SMP"
 	bool "SMTC: Use all TCs on all VPEs for SMP"
@@ -1664,6 +1673,14 @@ config MIPS_MT_SMTC
 	help
 	help
 	  This is a kernel model which is known a SMTC or lately has been
 	  This is a kernel model which is known a SMTC or lately has been
 	  marketesed into SMVP.
 	  marketesed into SMVP.
+	  is presenting the available TC's of the core as processors to Linux.
+	  On currently available 34K processors this means a Linux system will
+	  see up to 5 processors. The implementation of the SMTC kernel differs
+	  significantly from VSMP and cannot efficiently coexist in the same
+	  kernel binary so the choice between VSMP and SMTC is a compile time
+	  decision.
+
+	  For further information see http://www.linux-mips.org/wiki/34K#SMTC
 
 
 endchoice
 endchoice
 
 

+ 2 - 3
arch/mips/alchemy/common/prom.c

@@ -43,7 +43,7 @@ int prom_argc;
 char **prom_argv;
 char **prom_argv;
 char **prom_envp;
 char **prom_envp;
 
 
-void prom_init_cmdline(void)
+void __init prom_init_cmdline(void)
 {
 {
 	int i;
 	int i;
 
 
@@ -104,7 +104,7 @@ static inline void str2eaddr(unsigned char *ea, unsigned char *str)
 	}
 	}
 }
 }
 
 
-int prom_get_ethernet_addr(char *ethernet_addr)
+int __init prom_get_ethernet_addr(char *ethernet_addr)
 {
 {
 	char *ethaddr_str;
 	char *ethaddr_str;
 
 
@@ -123,7 +123,6 @@ int prom_get_ethernet_addr(char *ethernet_addr)
 
 
 	return 0;
 	return 0;
 }
 }
-EXPORT_SYMBOL(prom_get_ethernet_addr);
 
 
 void __init prom_free_prom_memory(void)
 void __init prom_free_prom_memory(void)
 {
 {

+ 1 - 1
arch/mips/boot/compressed/Makefile

@@ -59,7 +59,7 @@ $(obj)/piggy.o: $(obj)/dummy.o $(obj)/vmlinux.bin.z FORCE
 hostprogs-y := calc_vmlinuz_load_addr
 hostprogs-y := calc_vmlinuz_load_addr
 
 
 VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \
 VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \
-		$(objtree)/$(KBUILD_IMAGE) $(VMLINUX_LOAD_ADDRESS))
+		$(obj)/vmlinux.bin $(VMLINUX_LOAD_ADDRESS))
 
 
 vmlinuzobjs-y += $(obj)/piggy.o
 vmlinuzobjs-y += $(obj)/piggy.o
 
 

+ 4 - 0
arch/mips/cavium-octeon/Kconfig

@@ -83,3 +83,7 @@ config ARCH_SPARSEMEM_ENABLE
 	def_bool y
 	def_bool y
 	select SPARSEMEM_STATIC
 	select SPARSEMEM_STATIC
 	depends on CPU_CAVIUM_OCTEON
 	depends on CPU_CAVIUM_OCTEON
+
+config CAVIUM_OCTEON_HELPER
+	def_bool y
+	depends on OCTEON_ETHERNET || PCI

+ 1 - 1
arch/mips/cavium-octeon/cpu.c

@@ -41,7 +41,7 @@ static int cnmips_cu2_call(struct notifier_block *nfb, unsigned long action,
 	return NOTIFY_OK;		/* Let default notifier send signals */
 	return NOTIFY_OK;		/* Let default notifier send signals */
 }
 }
 
 
-static int cnmips_cu2_setup(void)
+static int __init cnmips_cu2_setup(void)
 {
 {
 	return cu2_notifier(cnmips_cu2_call, 0);
 	return cu2_notifier(cnmips_cu2_call, 0);
 }
 }

+ 1 - 1
arch/mips/cavium-octeon/executive/Makefile

@@ -11,4 +11,4 @@
 
 
 obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o
 obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o
 
 
-obj-$(CONFIG_PCI) += cvmx-helper-errata.o cvmx-helper-jtag.o
+obj-$(CONFIG_CAVIUM_OCTEON_HELPER) += cvmx-helper-errata.o cvmx-helper-jtag.o

+ 4 - 0
arch/mips/include/asm/atomic.h

@@ -782,6 +782,10 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
  */
  */
 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
 
 
+#else /* !CONFIG_64BIT */
+
+#include <asm-generic/atomic64.h>
+
 #endif /* CONFIG_64BIT */
 #endif /* CONFIG_64BIT */
 
 
 /*
 /*

+ 1 - 1
arch/mips/include/asm/cop2.h

@@ -24,7 +24,7 @@ extern int cu2_notifier_call_chain(unsigned long val, void *v);
 
 
 #define cu2_notifier(fn, pri)						\
 #define cu2_notifier(fn, pri)						\
 ({									\
 ({									\
-	static struct notifier_block fn##_nb __cpuinitdata = {		\
+	static struct notifier_block fn##_nb = {			\
 		.notifier_call = fn,					\
 		.notifier_call = fn,					\
 		.priority = pri						\
 		.priority = pri						\
 	};								\
 	};								\

+ 1 - 0
arch/mips/include/asm/gic.h

@@ -321,6 +321,7 @@ struct gic_intrmask_regs {
  */
  */
 struct gic_intr_map {
 struct gic_intr_map {
 	unsigned int cpunum;	/* Directed to this CPU */
 	unsigned int cpunum;	/* Directed to this CPU */
+#define GIC_UNUSED		0xdead			/* Dummy data */
 	unsigned int pin;	/* Directed to this Pin */
 	unsigned int pin;	/* Directed to this Pin */
 	unsigned int polarity;	/* Polarity : +/-	*/
 	unsigned int polarity;	/* Polarity : +/-	*/
 	unsigned int trigtype;	/* Trigger  : Edge/Levl */
 	unsigned int trigtype;	/* Trigger  : Edge/Levl */

+ 1 - 1
arch/mips/include/asm/mach-tx49xx/kmalloc.h

@@ -1,6 +1,6 @@
 #ifndef __ASM_MACH_TX49XX_KMALLOC_H
 #ifndef __ASM_MACH_TX49XX_KMALLOC_H
 #define __ASM_MACH_TX49XX_KMALLOC_H
 #define __ASM_MACH_TX49XX_KMALLOC_H
 
 
-#define ARCH_KMALLOC_MINALIGN	L1_CACHE_BYTES
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
 
 
 #endif /* __ASM_MACH_TX49XX_KMALLOC_H */
 #endif /* __ASM_MACH_TX49XX_KMALLOC_H */

+ 0 - 3
arch/mips/include/asm/mips-boards/maltaint.h

@@ -88,9 +88,6 @@
 
 
 #define GIC_EXT_INTR(x)		x
 #define GIC_EXT_INTR(x)		x
 
 
-/* Dummy data */
-#define X			0xdead
-
 /* External Interrupts used for IPI */
 /* External Interrupts used for IPI */
 #define GIC_IPI_EXT_INTR_RESCHED_VPE0	16
 #define GIC_IPI_EXT_INTR_RESCHED_VPE0	16
 #define GIC_IPI_EXT_INTR_CALLFNC_VPE0	17
 #define GIC_IPI_EXT_INTR_CALLFNC_VPE0	17

+ 14 - 0
arch/mips/include/asm/page.h

@@ -150,6 +150,20 @@ typedef struct { unsigned long pgprot; } pgprot_t;
     ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
     ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
 #endif
 #endif
 #define __va(x)		((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
 #define __va(x)		((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
+
+/*
+ * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad
+ * (lmo) rsp. 8431fd094d625b94d364fe393076ccef88e6ce18 (kernel.org).  The
+ * discussion can be found in lkml posting
+ * <a2ebde260608230500o3407b108hc03debb9da6e62c@mail.gmail.com> which is
+ * archived at http://lists.linuxcoding.com/kernel/2006-q3/msg17360.html
+ *
+ * It is unclear if the misscompilations mentioned in
+ * http://lkml.org/lkml/2010/8/8/138 also affect MIPS so we keep this one
+ * until GCC 3.x has been retired before we can apply
+ * https://patchwork.linux-mips.org/patch/1541/
+ */
+
 #define __pa_symbol(x)	__pa(RELOC_HIDE((unsigned long)(x), 0))
 #define __pa_symbol(x)	__pa(RELOC_HIDE((unsigned long)(x), 0))
 
 
 #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
 #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)

+ 2 - 1
arch/mips/include/asm/thread_info.h

@@ -146,7 +146,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
 #define _TIF_LOAD_WATCH		(1<<TIF_LOAD_WATCH)
 #define _TIF_LOAD_WATCH		(1<<TIF_LOAD_WATCH)
 
 
 /* work to do on interrupt/exception return */
 /* work to do on interrupt/exception return */
-#define _TIF_WORK_MASK		(0x0000ffef & ~_TIF_SECCOMP)
+#define _TIF_WORK_MASK		(0x0000ffef &				\
+					~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT))
 /* work to do on any return to u-space */
 /* work to do on any return to u-space */
 #define _TIF_ALLWORK_MASK	(0x8000ffff & ~_TIF_SECCOMP)
 #define _TIF_ALLWORK_MASK	(0x8000ffff & ~_TIF_SECCOMP)
 
 

+ 15 - 6
arch/mips/include/asm/unistd.h

@@ -356,16 +356,19 @@
 #define __NR_perf_event_open		(__NR_Linux + 333)
 #define __NR_perf_event_open		(__NR_Linux + 333)
 #define __NR_accept4			(__NR_Linux + 334)
 #define __NR_accept4			(__NR_Linux + 334)
 #define __NR_recvmmsg			(__NR_Linux + 335)
 #define __NR_recvmmsg			(__NR_Linux + 335)
+#define __NR_fanotify_init		(__NR_Linux + 336)
+#define __NR_fanotify_mark		(__NR_Linux + 337)
+#define __NR_prlimit64			(__NR_Linux + 338)
 
 
 /*
 /*
  * Offset of the last Linux o32 flavoured syscall
  * Offset of the last Linux o32 flavoured syscall
  */
  */
-#define __NR_Linux_syscalls		335
+#define __NR_Linux_syscalls		338
 
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 
 #define __NR_O32_Linux			4000
 #define __NR_O32_Linux			4000
-#define __NR_O32_Linux_syscalls		335
+#define __NR_O32_Linux_syscalls		338
 
 
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 
 
@@ -668,16 +671,19 @@
 #define __NR_perf_event_open		(__NR_Linux + 292)
 #define __NR_perf_event_open		(__NR_Linux + 292)
 #define __NR_accept4			(__NR_Linux + 293)
 #define __NR_accept4			(__NR_Linux + 293)
 #define __NR_recvmmsg			(__NR_Linux + 294)
 #define __NR_recvmmsg			(__NR_Linux + 294)
+#define __NR_fanotify_init		(__NR_Linux + 295)
+#define __NR_fanotify_mark		(__NR_Linux + 296)
+#define __NR_prlimit64			(__NR_Linux + 297)
 
 
 /*
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  * Offset of the last Linux 64-bit flavoured syscall
  */
  */
-#define __NR_Linux_syscalls		294
+#define __NR_Linux_syscalls		297
 
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 
 #define __NR_64_Linux			5000
 #define __NR_64_Linux			5000
-#define __NR_64_Linux_syscalls		294
+#define __NR_64_Linux_syscalls		297
 
 
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 
 
@@ -985,16 +991,19 @@
 #define __NR_accept4			(__NR_Linux + 297)
 #define __NR_accept4			(__NR_Linux + 297)
 #define __NR_recvmmsg			(__NR_Linux + 298)
 #define __NR_recvmmsg			(__NR_Linux + 298)
 #define __NR_getdents64			(__NR_Linux + 299)
 #define __NR_getdents64			(__NR_Linux + 299)
+#define __NR_fanotify_init		(__NR_Linux + 300)
+#define __NR_fanotify_mark		(__NR_Linux + 301)
+#define __NR_prlimit64			(__NR_Linux + 302)
 
 
 /*
 /*
  * Offset of the last N32 flavoured syscall
  * Offset of the last N32 flavoured syscall
  */
  */
-#define __NR_Linux_syscalls		299
+#define __NR_Linux_syscalls		302
 
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
 
 #define __NR_N32_Linux			6000
 #define __NR_N32_Linux			6000
-#define __NR_N32_Linux_syscalls		299
+#define __NR_N32_Linux_syscalls		302
 
 
 #ifdef __KERNEL__
 #ifdef __KERNEL__
 
 

+ 2 - 3
arch/mips/kernel/irq-gic.c

@@ -7,7 +7,6 @@
 #include <asm/io.h>
 #include <asm/io.h>
 #include <asm/gic.h>
 #include <asm/gic.h>
 #include <asm/gcmpregs.h>
 #include <asm/gcmpregs.h>
-#include <asm/mips-boards/maltaint.h>
 #include <asm/irq.h>
 #include <asm/irq.h>
 #include <linux/hardirq.h>
 #include <linux/hardirq.h>
 #include <asm-generic/bitops/find.h>
 #include <asm-generic/bitops/find.h>
@@ -131,7 +130,7 @@ static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
 	int		i;
 	int		i;
 
 
 	irq -= _irqbase;
 	irq -= _irqbase;
-	pr_debug(KERN_DEBUG "%s(%d) called\n", __func__, irq);
+	pr_debug("%s(%d) called\n", __func__, irq);
 	cpumask_and(&tmp, cpumask, cpu_online_mask);
 	cpumask_and(&tmp, cpumask, cpu_online_mask);
 	if (cpus_empty(tmp))
 	if (cpus_empty(tmp))
 		return -1;
 		return -1;
@@ -222,7 +221,7 @@ static void __init gic_basic_init(int numintrs, int numvpes,
 	/* Setup specifics */
 	/* Setup specifics */
 	for (i = 0; i < mapsize; i++) {
 	for (i = 0; i < mapsize; i++) {
 		cpu = intrmap[i].cpunum;
 		cpu = intrmap[i].cpunum;
-		if (cpu == X)
+		if (cpu == GIC_UNUSED)
 			continue;
 			continue;
 		if (cpu == 0 && i != 0 && intrmap[i].flags == 0)
 		if (cpu == 0 && i != 0 && intrmap[i].flags == 0)
 			continue;
 			continue;

+ 1 - 1
arch/mips/kernel/kgdb.c

@@ -283,7 +283,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
 	struct pt_regs *regs = args->regs;
 	struct pt_regs *regs = args->regs;
 	int trap = (regs->cp0_cause & 0x7c) >> 2;
 	int trap = (regs->cp0_cause & 0x7c) >> 2;
 
 
-	/* Userpace events, ignore. */
+	/* Userspace events, ignore. */
 	if (user_mode(regs))
 	if (user_mode(regs))
 		return NOTIFY_DONE;
 		return NOTIFY_DONE;
 
 

+ 1 - 1
arch/mips/kernel/kspd.c

@@ -251,7 +251,7 @@ void sp_work_handle_request(void)
  		memset(&tz, 0, sizeof(tz));
  		memset(&tz, 0, sizeof(tz));
  		if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv,
  		if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv,
 					     (int)&tz, 0, 0)) == 0)
 					     (int)&tz, 0, 0)) == 0)
-		ret.retval = tv.tv_sec;
+			ret.retval = tv.tv_sec;
 		break;
 		break;
 
 
  	case MTSP_SYSCALL_EXIT:
  	case MTSP_SYSCALL_EXIT:

+ 7 - 0
arch/mips/kernel/linux32.c

@@ -341,3 +341,10 @@ asmlinkage long sys32_lookup_dcookie(u32 a0, u32 a1, char __user *buf,
 {
 {
 	return sys_lookup_dcookie(merge_64(a0, a1), buf, len);
 	return sys_lookup_dcookie(merge_64(a0, a1), buf, len);
 }
 }
+
+SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags,
+		u64, a3, u64, a4, int, dfd, const char  __user *, pathname)
+{
+	return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4),
+				 dfd, pathname);
+}

+ 4 - 1
arch/mips/kernel/scall32-o32.S

@@ -583,7 +583,10 @@ einval:	li	v0, -ENOSYS
 	sys	sys_rt_tgsigqueueinfo	4
 	sys	sys_rt_tgsigqueueinfo	4
 	sys	sys_perf_event_open	5
 	sys	sys_perf_event_open	5
 	sys	sys_accept4		4
 	sys	sys_accept4		4
-	sys     sys_recvmmsg            5
+	sys	sys_recvmmsg		5	/* 4335 */
+	sys	sys_fanotify_init	2
+	sys	sys_fanotify_mark	6
+	sys	sys_prlimit64		4
 	.endm
 	.endm
 
 
 	/* We pre-compute the number of _instruction_ bytes needed to
 	/* We pre-compute the number of _instruction_ bytes needed to

+ 5 - 2
arch/mips/kernel/scall64-64.S

@@ -416,9 +416,12 @@ sys_call_table:
 	PTR	sys_pipe2
 	PTR	sys_pipe2
 	PTR	sys_inotify_init1
 	PTR	sys_inotify_init1
 	PTR	sys_preadv
 	PTR	sys_preadv
-	PTR	sys_pwritev			/* 5390 */
+	PTR	sys_pwritev			/* 5290 */
 	PTR	sys_rt_tgsigqueueinfo
 	PTR	sys_rt_tgsigqueueinfo
 	PTR	sys_perf_event_open
 	PTR	sys_perf_event_open
 	PTR	sys_accept4
 	PTR	sys_accept4
-	PTR     sys_recvmmsg
+	PTR	sys_recvmmsg
+	PTR	sys_fanotify_init		/* 5295 */
+	PTR	sys_fanotify_mark
+	PTR	sys_prlimit64
 	.size	sys_call_table,.-sys_call_table
 	.size	sys_call_table,.-sys_call_table

+ 4 - 1
arch/mips/kernel/scall64-n32.S

@@ -419,5 +419,8 @@ EXPORT(sysn32_call_table)
 	PTR	sys_perf_event_open
 	PTR	sys_perf_event_open
 	PTR	sys_accept4
 	PTR	sys_accept4
 	PTR     compat_sys_recvmmsg
 	PTR     compat_sys_recvmmsg
-	PTR     sys_getdents
+	PTR     sys_getdents64
+	PTR	sys_fanotify_init		/* 6300 */
+	PTR	sys_fanotify_mark
+	PTR	sys_prlimit64
 	.size	sysn32_call_table,.-sysn32_call_table
 	.size	sysn32_call_table,.-sysn32_call_table

+ 4 - 1
arch/mips/kernel/scall64-o32.S

@@ -538,5 +538,8 @@ sys_call_table:
 	PTR	compat_sys_rt_tgsigqueueinfo
 	PTR	compat_sys_rt_tgsigqueueinfo
 	PTR	sys_perf_event_open
 	PTR	sys_perf_event_open
 	PTR	sys_accept4
 	PTR	sys_accept4
-	PTR     compat_sys_recvmmsg
+	PTR	compat_sys_recvmmsg		/* 4335 */
+	PTR	sys_fanotify_init
+	PTR	sys_32_fanotify_mark
+	PTR	sys_prlimit64
 	.size	sys_call_table,.-sys_call_table
 	.size	sys_call_table,.-sys_call_table

+ 20 - 8
arch/mips/mm/dma-default.c

@@ -44,27 +44,39 @@ static inline int cpu_is_noncoherent_r10000(struct device *dev)
 
 
 static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
 static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
 {
 {
+	gfp_t dma_flag;
+
 	/* ignore region specifiers */
 	/* ignore region specifiers */
 	gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
 	gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
 
 
-#ifdef CONFIG_ZONE_DMA
+#ifdef CONFIG_ISA
 	if (dev == NULL)
 	if (dev == NULL)
-		gfp |= __GFP_DMA;
-	else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
-		gfp |= __GFP_DMA;
+		dma_flag = __GFP_DMA;
 	else
 	else
 #endif
 #endif
-#ifdef CONFIG_ZONE_DMA32
+#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
 	     if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
 	     if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
-		gfp |= __GFP_DMA32;
+			dma_flag = __GFP_DMA;
+	else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
+			dma_flag = __GFP_DMA32;
+	else
+#endif
+#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
+	     if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
+		dma_flag = __GFP_DMA32;
+	else
+#endif
+#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
+	     if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
+		dma_flag = __GFP_DMA;
 	else
 	else
 #endif
 #endif
-		;
+		dma_flag = 0;
 
 
 	/* Don't invoke OOM killer */
 	/* Don't invoke OOM killer */
 	gfp |= __GFP_NORETRY;
 	gfp |= __GFP_NORETRY;
 
 
-	return gfp;
+	return gfp | dma_flag;
 }
 }
 
 
 void *dma_alloc_noncoherent(struct device *dev, size_t size,
 void *dma_alloc_noncoherent(struct device *dev, size_t size,

+ 1 - 1
arch/mips/mm/sc-rm7k.c

@@ -30,7 +30,7 @@
 #define tc_lsize	32
 #define tc_lsize	32
 
 
 extern unsigned long icache_way_size, dcache_way_size;
 extern unsigned long icache_way_size, dcache_way_size;
-unsigned long tcache_size;
+static unsigned long tcache_size;
 
 
 #include <asm/r4kcache.h>
 #include <asm/r4kcache.h>
 
 

+ 3 - 0
arch/mips/mti-malta/malta-int.c

@@ -385,6 +385,8 @@ static int __initdata msc_nr_eicirqs = ARRAY_SIZE(msc_eicirqmap);
  */
  */
 
 
 #define GIC_CPU_NMI GIC_MAP_TO_NMI_MSK
 #define GIC_CPU_NMI GIC_MAP_TO_NMI_MSK
+#define X GIC_UNUSED
+
 static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
 static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
 	{ X, X,		   X,		X,		0 },
 	{ X, X,		   X,		X,		0 },
 	{ X, X,		   X,	 	X,		0 },
 	{ X, X,		   X,	 	X,		0 },
@@ -404,6 +406,7 @@ static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
 	{ X, X,		   X,		X,	        0 },
 	{ X, X,		   X,		X,	        0 },
 	/* The remainder of this table is initialised by fill_ipi_map */
 	/* The remainder of this table is initialised by fill_ipi_map */
 };
 };
+#undef X
 
 
 /*
 /*
  * GCMP needs to be detected before any SMP initialisation
  * GCMP needs to be detected before any SMP initialisation

+ 1 - 1
arch/mips/pci/pci-rc32434.c

@@ -118,7 +118,7 @@ static int __init rc32434_pcibridge_init(void)
 	if (!((pcicvalue == PCIM_H_EA) ||
 	if (!((pcicvalue == PCIM_H_EA) ||
 	      (pcicvalue == PCIM_H_IA_FIX) ||
 	      (pcicvalue == PCIM_H_IA_FIX) ||
 	      (pcicvalue == PCIM_H_IA_RR))) {
 	      (pcicvalue == PCIM_H_IA_RR))) {
-		pr_err(KERN_ERR "PCI init error!!!\n");
+		pr_err("PCI init error!!!\n");
 		/* Not in Host Mode, return ERROR */
 		/* Not in Host Mode, return ERROR */
 		return -1;
 		return -1;
 	}
 	}

+ 5 - 15
arch/mips/pnx8550/common/reset.c

@@ -22,29 +22,19 @@
  */
  */
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 
 
+#include <asm/processor.h>
 #include <asm/reboot.h>
 #include <asm/reboot.h>
 #include <glb.h>
 #include <glb.h>
 
 
 void pnx8550_machine_restart(char *command)
 void pnx8550_machine_restart(char *command)
 {
 {
-	char head[] = "************* Machine restart *************";
-	char foot[] = "*******************************************";
-
-	printk("\n\n");
-	printk("%s\n", head);
-	if (command != NULL)
-		printk("* %s\n", command);
-	printk("%s\n", foot);
-
 	PNX8550_RST_CTL = PNX8550_RST_DO_SW_RST;
 	PNX8550_RST_CTL = PNX8550_RST_DO_SW_RST;
 }
 }
 
 
 void pnx8550_machine_halt(void)
 void pnx8550_machine_halt(void)
 {
 {
-	printk("*** Machine halt. (Not implemented) ***\n");
-}
-
-void pnx8550_machine_power_off(void)
-{
-	printk("*** Machine power off.  (Not implemented) ***\n");
+	while (1) {
+		if (cpu_wait)
+			cpu_wait();
+	}
 }
 }

+ 1 - 2
arch/mips/pnx8550/common/setup.c

@@ -44,7 +44,6 @@
 extern void __init board_setup(void);
 extern void __init board_setup(void);
 extern void pnx8550_machine_restart(char *);
 extern void pnx8550_machine_restart(char *);
 extern void pnx8550_machine_halt(void);
 extern void pnx8550_machine_halt(void);
-extern void pnx8550_machine_power_off(void);
 extern struct resource ioport_resource;
 extern struct resource ioport_resource;
 extern struct resource iomem_resource;
 extern struct resource iomem_resource;
 extern char *prom_getcmdline(void);
 extern char *prom_getcmdline(void);
@@ -100,7 +99,7 @@ void __init plat_mem_setup(void)
 
 
         _machine_restart = pnx8550_machine_restart;
         _machine_restart = pnx8550_machine_restart;
         _machine_halt = pnx8550_machine_halt;
         _machine_halt = pnx8550_machine_halt;
-        pm_power_off = pnx8550_machine_power_off;
+        pm_power_off = pnx8550_machine_halt;
 
 
 	/* Clear the Global 2 Register, PCI Inta Output Enable Registers
 	/* Clear the Global 2 Register, PCI Inta Output Enable Registers
 	   Bit 1:Enable DAC Powerdown
 	   Bit 1:Enable DAC Powerdown

+ 0 - 1
arch/mn10300/Kconfig

@@ -8,7 +8,6 @@ mainmenu "Linux Kernel Configuration"
 config MN10300
 config MN10300
 	def_bool y
 	def_bool y
 	select HAVE_OPROFILE
 	select HAVE_OPROFILE
-	select HAVE_ARCH_TRACEHOOK
 
 
 config AM33
 config AM33
 	def_bool y
 	def_bool y

+ 1 - 1
arch/mn10300/Kconfig.debug

@@ -101,7 +101,7 @@ config GDBSTUB_DEBUG_BREAKPOINT
 
 
 choice
 choice
 	prompt "GDB stub port"
 	prompt "GDB stub port"
-	default GDBSTUB_TTYSM0
+	default GDBSTUB_ON_TTYSM0
 	depends on GDBSTUB
 	depends on GDBSTUB
 	help
 	help
 	  Select the serial port used for GDB-stub.
 	  Select the serial port used for GDB-stub.

+ 2 - 2
arch/mn10300/include/asm/bitops.h

@@ -229,9 +229,9 @@ int ffs(int x)
 #include <asm-generic/bitops/hweight.h>
 #include <asm-generic/bitops/hweight.h>
 
 
 #define ext2_set_bit_atomic(lock, nr, addr) \
 #define ext2_set_bit_atomic(lock, nr, addr) \
-	test_and_set_bit((nr) ^ 0x18, (addr))
+	test_and_set_bit((nr), (addr))
 #define ext2_clear_bit_atomic(lock, nr, addr) \
 #define ext2_clear_bit_atomic(lock, nr, addr) \
-	test_and_clear_bit((nr) ^ 0x18, (addr))
+	test_and_clear_bit((nr), (addr))
 
 
 #include <asm-generic/bitops/ext2-non-atomic.h>
 #include <asm-generic/bitops/ext2-non-atomic.h>
 #include <asm-generic/bitops/minix-le.h>
 #include <asm-generic/bitops/minix-le.h>

+ 1 - 1
arch/mn10300/include/asm/signal.h

@@ -78,7 +78,7 @@ typedef unsigned long sigset_t;
 
 
 /* These should not be considered constants from userland.  */
 /* These should not be considered constants from userland.  */
 #define SIGRTMIN	32
 #define SIGRTMIN	32
-#define SIGRTMAX	(_NSIG-1)
+#define SIGRTMAX	_NSIG
 
 
 /*
 /*
  * SA_FLAGS values:
  * SA_FLAGS values:

+ 1 - 2
arch/mn10300/kernel/module.c

@@ -206,7 +206,7 @@ int module_finalize(const Elf_Ehdr *hdr,
 		    const Elf_Shdr *sechdrs,
 		    const Elf_Shdr *sechdrs,
 		    struct module *me)
 		    struct module *me)
 {
 {
-	return module_bug_finalize(hdr, sechdrs, me);
+	return 0;
 }
 }
 
 
 /*
 /*
@@ -214,5 +214,4 @@ int module_finalize(const Elf_Ehdr *hdr,
  */
  */
 void module_arch_cleanup(struct module *mod)
 void module_arch_cleanup(struct module *mod)
 {
 {
-	module_bug_cleanup(mod);
 }
 }

+ 20 - 15
arch/mn10300/kernel/signal.c

@@ -65,10 +65,10 @@ asmlinkage long sys_sigaction(int sig,
 		old_sigset_t mask;
 		old_sigset_t mask;
 		if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
 		if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
 		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
 		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
-		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
+		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
+		    __get_user(mask, &act->sa_mask))
 			return -EFAULT;
 			return -EFAULT;
-		__get_user(new_ka.sa.sa_flags, &act->sa_flags);
-		__get_user(mask, &act->sa_mask);
 		siginitset(&new_ka.sa.sa_mask, mask);
 		siginitset(&new_ka.sa.sa_mask, mask);
 	}
 	}
 
 
@@ -77,10 +77,10 @@ asmlinkage long sys_sigaction(int sig,
 	if (!ret && oact) {
 	if (!ret && oact) {
 		if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
 		if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
 		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
 		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
-		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
+		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
+		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
 			return -EFAULT;
 			return -EFAULT;
-		__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
-		__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
 	}
 	}
 
 
 	return ret;
 	return ret;
@@ -102,6 +102,9 @@ static int restore_sigcontext(struct pt_regs *regs,
 {
 {
 	unsigned int err = 0;
 	unsigned int err = 0;
 
 
+	/* Always make any pending restarted system calls return -EINTR */
+	current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
 	if (is_using_fpu(current))
 	if (is_using_fpu(current))
 		fpu_kill_state(current);
 		fpu_kill_state(current);
 
 
@@ -330,8 +333,6 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
 	regs->d0 = sig;
 	regs->d0 = sig;
 	regs->d1 = (unsigned long) &frame->sc;
 	regs->d1 = (unsigned long) &frame->sc;
 
 
-	set_fs(USER_DS);
-
 	/* the tracer may want to single-step inside the handler */
 	/* the tracer may want to single-step inside the handler */
 	if (test_thread_flag(TIF_SINGLESTEP))
 	if (test_thread_flag(TIF_SINGLESTEP))
 		ptrace_notify(SIGTRAP);
 		ptrace_notify(SIGTRAP);
@@ -345,7 +346,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
 	return 0;
 	return 0;
 
 
 give_sigsegv:
 give_sigsegv:
-	force_sig(SIGSEGV, current);
+	force_sigsegv(sig, current);
 	return -EFAULT;
 	return -EFAULT;
 }
 }
 
 
@@ -413,8 +414,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
 	regs->d0 = sig;
 	regs->d0 = sig;
 	regs->d1 = (long) &frame->info;
 	regs->d1 = (long) &frame->info;
 
 
-	set_fs(USER_DS);
-
 	/* the tracer may want to single-step inside the handler */
 	/* the tracer may want to single-step inside the handler */
 	if (test_thread_flag(TIF_SINGLESTEP))
 	if (test_thread_flag(TIF_SINGLESTEP))
 		ptrace_notify(SIGTRAP);
 		ptrace_notify(SIGTRAP);
@@ -428,10 +427,16 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
 	return 0;
 	return 0;
 
 
 give_sigsegv:
 give_sigsegv:
-	force_sig(SIGSEGV, current);
+	force_sigsegv(sig, current);
 	return -EFAULT;
 	return -EFAULT;
 }
 }
 
 
+static inline void stepback(struct pt_regs *regs)
+{
+	regs->pc -= 2;
+	regs->orig_d0 = -1;
+}
+
 /*
 /*
  * handle the actual delivery of a signal to userspace
  * handle the actual delivery of a signal to userspace
  */
  */
@@ -459,7 +464,7 @@ static int handle_signal(int sig,
 			/* fallthrough */
 			/* fallthrough */
 		case -ERESTARTNOINTR:
 		case -ERESTARTNOINTR:
 			regs->d0 = regs->orig_d0;
 			regs->d0 = regs->orig_d0;
-			regs->pc -= 2;
+			stepback(regs);
 		}
 		}
 	}
 	}
 
 
@@ -527,12 +532,12 @@ static void do_signal(struct pt_regs *regs)
 		case -ERESTARTSYS:
 		case -ERESTARTSYS:
 		case -ERESTARTNOINTR:
 		case -ERESTARTNOINTR:
 			regs->d0 = regs->orig_d0;
 			regs->d0 = regs->orig_d0;
-			regs->pc -= 2;
+			stepback(regs);
 			break;
 			break;
 
 
 		case -ERESTART_RESTARTBLOCK:
 		case -ERESTART_RESTARTBLOCK:
 			regs->d0 = __NR_restart_syscall;
 			regs->d0 = __NR_restart_syscall;
-			regs->pc -= 2;
+			stepback(regs);
 			break;
 			break;
 		}
 		}
 	}
 	}

+ 6 - 8
arch/mn10300/mm/Makefile

@@ -2,13 +2,11 @@
 # Makefile for the MN10300-specific memory management code
 # Makefile for the MN10300-specific memory management code
 #
 #
 
 
+cacheflush-y	:= cache.o cache-mn10300.o
+cacheflush-$(CONFIG_MN10300_CACHE_WBACK) += cache-flush-mn10300.o
+
+cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o
+
 obj-y := \
 obj-y := \
 	init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \
 	init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \
-	misalignment.o dma-alloc.o
-
-ifneq ($(CONFIG_MN10300_CACHE_DISABLED),y)
-obj-y	+= cache.o cache-mn10300.o
-ifeq ($(CONFIG_MN10300_CACHE_WBACK),y)
-obj-y	+= cache-flush-mn10300.o
-endif
-endif
+	misalignment.o dma-alloc.o $(cacheflush-y)

+ 21 - 0
arch/mn10300/mm/cache-disabled.c

@@ -0,0 +1,21 @@
+/* Handle the cache being disabled
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/mm.h>
+
+/*
+ * allow userspace to flush the instruction cache
+ */
+asmlinkage long sys_cacheflush(unsigned long start, unsigned long end)
+{
+	if (end < start)
+		return -EINVAL;
+	return 0;
+}

+ 19 - 1
arch/mn10300/mm/cache.c

@@ -54,13 +54,30 @@ EXPORT_SYMBOL(flush_icache_page);
 void flush_icache_range(unsigned long start, unsigned long end)
 void flush_icache_range(unsigned long start, unsigned long end)
 {
 {
 #ifdef CONFIG_MN10300_CACHE_WBACK
 #ifdef CONFIG_MN10300_CACHE_WBACK
-	unsigned long addr, size, off;
+	unsigned long addr, size, base, off;
 	struct page *page;
 	struct page *page;
 	pgd_t *pgd;
 	pgd_t *pgd;
 	pud_t *pud;
 	pud_t *pud;
 	pmd_t *pmd;
 	pmd_t *pmd;
 	pte_t *ppte, pte;
 	pte_t *ppte, pte;
 
 
+	if (end > 0x80000000UL) {
+		/* addresses above 0xa0000000 do not go through the cache */
+		if (end > 0xa0000000UL) {
+			end = 0xa0000000UL;
+			if (start >= end)
+				return;
+		}
+
+		/* kernel addresses between 0x80000000 and 0x9fffffff do not
+		 * require page tables, so we just map such addresses directly */
+		base = (start >= 0x80000000UL) ? start : 0x80000000UL;
+		mn10300_dcache_flush_range(base, end);
+		if (base == start)
+			goto invalidate;
+		end = base;
+	}
+
 	for (; start < end; start += size) {
 	for (; start < end; start += size) {
 		/* work out how much of the page to flush */
 		/* work out how much of the page to flush */
 		off = start & (PAGE_SIZE - 1);
 		off = start & (PAGE_SIZE - 1);
@@ -104,6 +121,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
 	}
 	}
 #endif
 #endif
 
 
+invalidate:
 	mn10300_icache_inv();
 	mn10300_icache_inv();
 }
 }
 EXPORT_SYMBOL(flush_icache_range);
 EXPORT_SYMBOL(flush_icache_range);

+ 1 - 2
arch/parisc/kernel/module.c

@@ -941,11 +941,10 @@ int module_finalize(const Elf_Ehdr *hdr,
 	nsyms = newptr - (Elf_Sym *)symhdr->sh_addr;
 	nsyms = newptr - (Elf_Sym *)symhdr->sh_addr;
 	DEBUGP("NEW num_symtab %lu\n", nsyms);
 	DEBUGP("NEW num_symtab %lu\n", nsyms);
 	symhdr->sh_size = nsyms * sizeof(Elf_Sym);
 	symhdr->sh_size = nsyms * sizeof(Elf_Sym);
-	return module_bug_finalize(hdr, sechdrs, me);
+	return 0;
 }
 }
 
 
 void module_arch_cleanup(struct module *mod)
 void module_arch_cleanup(struct module *mod)
 {
 {
 	deregister_unwind_table(mod);
 	deregister_unwind_table(mod);
-	module_bug_cleanup(mod);
 }
 }

+ 0 - 6
arch/powerpc/kernel/module.c

@@ -63,11 +63,6 @@ int module_finalize(const Elf_Ehdr *hdr,
 		const Elf_Shdr *sechdrs, struct module *me)
 		const Elf_Shdr *sechdrs, struct module *me)
 {
 {
 	const Elf_Shdr *sect;
 	const Elf_Shdr *sect;
-	int err;
-
-	err = module_bug_finalize(hdr, sechdrs, me);
-	if (err)
-		return err;
 
 
 	/* Apply feature fixups */
 	/* Apply feature fixups */
 	sect = find_section(hdr, sechdrs, "__ftr_fixup");
 	sect = find_section(hdr, sechdrs, "__ftr_fixup");
@@ -101,5 +96,4 @@ int module_finalize(const Elf_Ehdr *hdr,
 
 
 void module_arch_cleanup(struct module *mod)
 void module_arch_cleanup(struct module *mod)
 {
 {
-	module_bug_cleanup(mod);
 }
 }

+ 25 - 61
arch/powerpc/kernel/perf_callchain.c

@@ -23,18 +23,6 @@
 #include "ppc32.h"
 #include "ppc32.h"
 #endif
 #endif
 
 
-/*
- * Store another value in a callchain_entry.
- */
-static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
-{
-	unsigned int nr = entry->nr;
-
-	if (nr < PERF_MAX_STACK_DEPTH) {
-		entry->ip[nr] = ip;
-		entry->nr = nr + 1;
-	}
-}
 
 
 /*
 /*
  * Is sp valid as the address of the next kernel stack frame after prev_sp?
  * Is sp valid as the address of the next kernel stack frame after prev_sp?
@@ -58,8 +46,8 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
 	return 0;
 	return 0;
 }
 }
 
 
-static void perf_callchain_kernel(struct pt_regs *regs,
-				  struct perf_callchain_entry *entry)
+void
+perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
 {
 	unsigned long sp, next_sp;
 	unsigned long sp, next_sp;
 	unsigned long next_ip;
 	unsigned long next_ip;
@@ -69,8 +57,7 @@ static void perf_callchain_kernel(struct pt_regs *regs,
 
 
 	lr = regs->link;
 	lr = regs->link;
 	sp = regs->gpr[1];
 	sp = regs->gpr[1];
-	callchain_store(entry, PERF_CONTEXT_KERNEL);
-	callchain_store(entry, regs->nip);
+	perf_callchain_store(entry, regs->nip);
 
 
 	if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
 	if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
 		return;
 		return;
@@ -89,7 +76,7 @@ static void perf_callchain_kernel(struct pt_regs *regs,
 			next_ip = regs->nip;
 			next_ip = regs->nip;
 			lr = regs->link;
 			lr = regs->link;
 			level = 0;
 			level = 0;
-			callchain_store(entry, PERF_CONTEXT_KERNEL);
+			perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
 
 
 		} else {
 		} else {
 			if (level == 0)
 			if (level == 0)
@@ -111,7 +98,7 @@ static void perf_callchain_kernel(struct pt_regs *regs,
 			++level;
 			++level;
 		}
 		}
 
 
-		callchain_store(entry, next_ip);
+		perf_callchain_store(entry, next_ip);
 		if (!valid_next_sp(next_sp, sp))
 		if (!valid_next_sp(next_sp, sp))
 			return;
 			return;
 		sp = next_sp;
 		sp = next_sp;
@@ -233,8 +220,8 @@ static int sane_signal_64_frame(unsigned long sp)
 		puc == (unsigned long) &sf->uc;
 		puc == (unsigned long) &sf->uc;
 }
 }
 
 
-static void perf_callchain_user_64(struct pt_regs *regs,
-				   struct perf_callchain_entry *entry)
+static void perf_callchain_user_64(struct perf_callchain_entry *entry,
+				   struct pt_regs *regs)
 {
 {
 	unsigned long sp, next_sp;
 	unsigned long sp, next_sp;
 	unsigned long next_ip;
 	unsigned long next_ip;
@@ -246,8 +233,7 @@ static void perf_callchain_user_64(struct pt_regs *regs,
 	next_ip = regs->nip;
 	next_ip = regs->nip;
 	lr = regs->link;
 	lr = regs->link;
 	sp = regs->gpr[1];
 	sp = regs->gpr[1];
-	callchain_store(entry, PERF_CONTEXT_USER);
-	callchain_store(entry, next_ip);
+	perf_callchain_store(entry, next_ip);
 
 
 	for (;;) {
 	for (;;) {
 		fp = (unsigned long __user *) sp;
 		fp = (unsigned long __user *) sp;
@@ -276,14 +262,14 @@ static void perf_callchain_user_64(struct pt_regs *regs,
 			    read_user_stack_64(&uregs[PT_R1], &sp))
 			    read_user_stack_64(&uregs[PT_R1], &sp))
 				return;
 				return;
 			level = 0;
 			level = 0;
-			callchain_store(entry, PERF_CONTEXT_USER);
-			callchain_store(entry, next_ip);
+			perf_callchain_store(entry, PERF_CONTEXT_USER);
+			perf_callchain_store(entry, next_ip);
 			continue;
 			continue;
 		}
 		}
 
 
 		if (level == 0)
 		if (level == 0)
 			next_ip = lr;
 			next_ip = lr;
-		callchain_store(entry, next_ip);
+		perf_callchain_store(entry, next_ip);
 		++level;
 		++level;
 		sp = next_sp;
 		sp = next_sp;
 	}
 	}
@@ -315,8 +301,8 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
 	return __get_user_inatomic(*ret, ptr);
 	return __get_user_inatomic(*ret, ptr);
 }
 }
 
 
-static inline void perf_callchain_user_64(struct pt_regs *regs,
-					  struct perf_callchain_entry *entry)
+static inline void perf_callchain_user_64(struct perf_callchain_entry *entry,
+					  struct pt_regs *regs)
 {
 {
 }
 }
 
 
@@ -435,8 +421,8 @@ static unsigned int __user *signal_frame_32_regs(unsigned int sp,
 	return mctx->mc_gregs;
 	return mctx->mc_gregs;
 }
 }
 
 
-static void perf_callchain_user_32(struct pt_regs *regs,
-				   struct perf_callchain_entry *entry)
+static void perf_callchain_user_32(struct perf_callchain_entry *entry,
+				   struct pt_regs *regs)
 {
 {
 	unsigned int sp, next_sp;
 	unsigned int sp, next_sp;
 	unsigned int next_ip;
 	unsigned int next_ip;
@@ -447,8 +433,7 @@ static void perf_callchain_user_32(struct pt_regs *regs,
 	next_ip = regs->nip;
 	next_ip = regs->nip;
 	lr = regs->link;
 	lr = regs->link;
 	sp = regs->gpr[1];
 	sp = regs->gpr[1];
-	callchain_store(entry, PERF_CONTEXT_USER);
-	callchain_store(entry, next_ip);
+	perf_callchain_store(entry, next_ip);
 
 
 	while (entry->nr < PERF_MAX_STACK_DEPTH) {
 	while (entry->nr < PERF_MAX_STACK_DEPTH) {
 		fp = (unsigned int __user *) (unsigned long) sp;
 		fp = (unsigned int __user *) (unsigned long) sp;
@@ -470,45 +455,24 @@ static void perf_callchain_user_32(struct pt_regs *regs,
 			    read_user_stack_32(&uregs[PT_R1], &sp))
 			    read_user_stack_32(&uregs[PT_R1], &sp))
 				return;
 				return;
 			level = 0;
 			level = 0;
-			callchain_store(entry, PERF_CONTEXT_USER);
-			callchain_store(entry, next_ip);
+			perf_callchain_store(entry, PERF_CONTEXT_USER);
+			perf_callchain_store(entry, next_ip);
 			continue;
 			continue;
 		}
 		}
 
 
 		if (level == 0)
 		if (level == 0)
 			next_ip = lr;
 			next_ip = lr;
-		callchain_store(entry, next_ip);
+		perf_callchain_store(entry, next_ip);
 		++level;
 		++level;
 		sp = next_sp;
 		sp = next_sp;
 	}
 	}
 }
 }
 
 
-/*
- * Since we can't get PMU interrupts inside a PMU interrupt handler,
- * we don't need separate irq and nmi entries here.
- */
-static DEFINE_PER_CPU(struct perf_callchain_entry, cpu_perf_callchain);
-
-struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+void
+perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
 {
-	struct perf_callchain_entry *entry = &__get_cpu_var(cpu_perf_callchain);
-
-	entry->nr = 0;
-
-	if (!user_mode(regs)) {
-		perf_callchain_kernel(regs, entry);
-		if (current->mm)
-			regs = task_pt_regs(current);
-		else
-			regs = NULL;
-	}
-
-	if (regs) {
-		if (current_is_64bit())
-			perf_callchain_user_64(regs, entry);
-		else
-			perf_callchain_user_32(regs, entry);
-	}
-
-	return entry;
+	if (current_is_64bit())
+		perf_callchain_user_64(entry, regs);
+	else
+		perf_callchain_user_32(entry, regs);
 }
 }

+ 101 - 63
arch/powerpc/kernel/perf_event.c

@@ -402,6 +402,9 @@ static void power_pmu_read(struct perf_event *event)
 {
 {
 	s64 val, delta, prev;
 	s64 val, delta, prev;
 
 
+	if (event->hw.state & PERF_HES_STOPPED)
+		return;
+
 	if (!event->hw.idx)
 	if (!event->hw.idx)
 		return;
 		return;
 	/*
 	/*
@@ -517,7 +520,7 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
  * Disable all events to prevent PMU interrupts and to allow
  * Disable all events to prevent PMU interrupts and to allow
  * events to be added or removed.
  * events to be added or removed.
  */
  */
-void hw_perf_disable(void)
+static void power_pmu_disable(struct pmu *pmu)
 {
 {
 	struct cpu_hw_events *cpuhw;
 	struct cpu_hw_events *cpuhw;
 	unsigned long flags;
 	unsigned long flags;
@@ -565,7 +568,7 @@ void hw_perf_disable(void)
  * If we were previously disabled and events were added, then
  * If we were previously disabled and events were added, then
  * put the new config on the PMU.
  * put the new config on the PMU.
  */
  */
-void hw_perf_enable(void)
+static void power_pmu_enable(struct pmu *pmu)
 {
 {
 	struct perf_event *event;
 	struct perf_event *event;
 	struct cpu_hw_events *cpuhw;
 	struct cpu_hw_events *cpuhw;
@@ -672,6 +675,8 @@ void hw_perf_enable(void)
 		}
 		}
 		local64_set(&event->hw.prev_count, val);
 		local64_set(&event->hw.prev_count, val);
 		event->hw.idx = idx;
 		event->hw.idx = idx;
+		if (event->hw.state & PERF_HES_STOPPED)
+			val = 0;
 		write_pmc(idx, val);
 		write_pmc(idx, val);
 		perf_event_update_userpage(event);
 		perf_event_update_userpage(event);
 	}
 	}
@@ -727,7 +732,7 @@ static int collect_events(struct perf_event *group, int max_count,
  * re-enable the PMU in order to get hw_perf_enable to do the
  * re-enable the PMU in order to get hw_perf_enable to do the
  * actual work of reconfiguring the PMU.
  * actual work of reconfiguring the PMU.
  */
  */
-static int power_pmu_enable(struct perf_event *event)
+static int power_pmu_add(struct perf_event *event, int ef_flags)
 {
 {
 	struct cpu_hw_events *cpuhw;
 	struct cpu_hw_events *cpuhw;
 	unsigned long flags;
 	unsigned long flags;
@@ -735,7 +740,7 @@ static int power_pmu_enable(struct perf_event *event)
 	int ret = -EAGAIN;
 	int ret = -EAGAIN;
 
 
 	local_irq_save(flags);
 	local_irq_save(flags);
-	perf_disable();
+	perf_pmu_disable(event->pmu);
 
 
 	/*
 	/*
 	 * Add the event to the list (if there is room)
 	 * Add the event to the list (if there is room)
@@ -749,6 +754,9 @@ static int power_pmu_enable(struct perf_event *event)
 	cpuhw->events[n0] = event->hw.config;
 	cpuhw->events[n0] = event->hw.config;
 	cpuhw->flags[n0] = event->hw.event_base;
 	cpuhw->flags[n0] = event->hw.event_base;
 
 
+	if (!(ef_flags & PERF_EF_START))
+		event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+
 	/*
 	/*
 	 * If group events scheduling transaction was started,
 	 * If group events scheduling transaction was started,
 	 * skip the schedulability test here, it will be peformed
 	 * skip the schedulability test here, it will be peformed
@@ -769,7 +777,7 @@ nocheck:
 
 
 	ret = 0;
 	ret = 0;
  out:
  out:
-	perf_enable();
+	perf_pmu_enable(event->pmu);
 	local_irq_restore(flags);
 	local_irq_restore(flags);
 	return ret;
 	return ret;
 }
 }
@@ -777,14 +785,14 @@ nocheck:
 /*
 /*
  * Remove a event from the PMU.
  * Remove a event from the PMU.
  */
  */
-static void power_pmu_disable(struct perf_event *event)
+static void power_pmu_del(struct perf_event *event, int ef_flags)
 {
 {
 	struct cpu_hw_events *cpuhw;
 	struct cpu_hw_events *cpuhw;
 	long i;
 	long i;
 	unsigned long flags;
 	unsigned long flags;
 
 
 	local_irq_save(flags);
 	local_irq_save(flags);
-	perf_disable();
+	perf_pmu_disable(event->pmu);
 
 
 	power_pmu_read(event);
 	power_pmu_read(event);
 
 
@@ -821,34 +829,60 @@ static void power_pmu_disable(struct perf_event *event)
 		cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
 		cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
 	}
 	}
 
 
-	perf_enable();
+	perf_pmu_enable(event->pmu);
 	local_irq_restore(flags);
 	local_irq_restore(flags);
 }
 }
 
 
 /*
 /*
- * Re-enable interrupts on a event after they were throttled
- * because they were coming too fast.
+ * POWER-PMU does not support disabling individual counters, hence
+ * program their cycle counter to their max value and ignore the interrupts.
  */
  */
-static void power_pmu_unthrottle(struct perf_event *event)
+
+static void power_pmu_start(struct perf_event *event, int ef_flags)
+{
+	unsigned long flags;
+	s64 left;
+
+	if (!event->hw.idx || !event->hw.sample_period)
+		return;
+
+	if (!(event->hw.state & PERF_HES_STOPPED))
+		return;
+
+	if (ef_flags & PERF_EF_RELOAD)
+		WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+
+	local_irq_save(flags);
+	perf_pmu_disable(event->pmu);
+
+	event->hw.state = 0;
+	left = local64_read(&event->hw.period_left);
+	write_pmc(event->hw.idx, left);
+
+	perf_event_update_userpage(event);
+	perf_pmu_enable(event->pmu);
+	local_irq_restore(flags);
+}
+
+static void power_pmu_stop(struct perf_event *event, int ef_flags)
 {
 {
-	s64 val, left;
 	unsigned long flags;
 	unsigned long flags;
 
 
 	if (!event->hw.idx || !event->hw.sample_period)
 	if (!event->hw.idx || !event->hw.sample_period)
 		return;
 		return;
+
+	if (event->hw.state & PERF_HES_STOPPED)
+		return;
+
 	local_irq_save(flags);
 	local_irq_save(flags);
-	perf_disable();
+	perf_pmu_disable(event->pmu);
+
 	power_pmu_read(event);
 	power_pmu_read(event);
-	left = event->hw.sample_period;
-	event->hw.last_period = left;
-	val = 0;
-	if (left < 0x80000000L)
-		val = 0x80000000L - left;
-	write_pmc(event->hw.idx, val);
-	local64_set(&event->hw.prev_count, val);
-	local64_set(&event->hw.period_left, left);
+	event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+	write_pmc(event->hw.idx, 0);
+
 	perf_event_update_userpage(event);
 	perf_event_update_userpage(event);
-	perf_enable();
+	perf_pmu_enable(event->pmu);
 	local_irq_restore(flags);
 	local_irq_restore(flags);
 }
 }
 
 
@@ -857,10 +891,11 @@ static void power_pmu_unthrottle(struct perf_event *event)
  * Set the flag to make pmu::enable() not perform the
  * Set the flag to make pmu::enable() not perform the
  * schedulability test, it will be performed at commit time
  * schedulability test, it will be performed at commit time
  */
  */
-void power_pmu_start_txn(const struct pmu *pmu)
+void power_pmu_start_txn(struct pmu *pmu)
 {
 {
 	struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 	struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 
 
+	perf_pmu_disable(pmu);
 	cpuhw->group_flag |= PERF_EVENT_TXN;
 	cpuhw->group_flag |= PERF_EVENT_TXN;
 	cpuhw->n_txn_start = cpuhw->n_events;
 	cpuhw->n_txn_start = cpuhw->n_events;
 }
 }
@@ -870,11 +905,12 @@ void power_pmu_start_txn(const struct pmu *pmu)
  * Clear the flag and pmu::enable() will perform the
  * Clear the flag and pmu::enable() will perform the
  * schedulability test.
  * schedulability test.
  */
  */
-void power_pmu_cancel_txn(const struct pmu *pmu)
+void power_pmu_cancel_txn(struct pmu *pmu)
 {
 {
 	struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 	struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 
 
 	cpuhw->group_flag &= ~PERF_EVENT_TXN;
 	cpuhw->group_flag &= ~PERF_EVENT_TXN;
+	perf_pmu_enable(pmu);
 }
 }
 
 
 /*
 /*
@@ -882,7 +918,7 @@ void power_pmu_cancel_txn(const struct pmu *pmu)
  * Perform the group schedulability test as a whole
  * Perform the group schedulability test as a whole
  * Return 0 if success
  * Return 0 if success
  */
  */
-int power_pmu_commit_txn(const struct pmu *pmu)
+int power_pmu_commit_txn(struct pmu *pmu)
 {
 {
 	struct cpu_hw_events *cpuhw;
 	struct cpu_hw_events *cpuhw;
 	long i, n;
 	long i, n;
@@ -901,19 +937,10 @@ int power_pmu_commit_txn(const struct pmu *pmu)
 		cpuhw->event[i]->hw.config = cpuhw->events[i];
 		cpuhw->event[i]->hw.config = cpuhw->events[i];
 
 
 	cpuhw->group_flag &= ~PERF_EVENT_TXN;
 	cpuhw->group_flag &= ~PERF_EVENT_TXN;
+	perf_pmu_enable(pmu);
 	return 0;
 	return 0;
 }
 }
 
 
-struct pmu power_pmu = {
-	.enable		= power_pmu_enable,
-	.disable	= power_pmu_disable,
-	.read		= power_pmu_read,
-	.unthrottle	= power_pmu_unthrottle,
-	.start_txn	= power_pmu_start_txn,
-	.cancel_txn	= power_pmu_cancel_txn,
-	.commit_txn	= power_pmu_commit_txn,
-};
-
 /*
 /*
  * Return 1 if we might be able to put event on a limited PMC,
  * Return 1 if we might be able to put event on a limited PMC,
  * or 0 if not.
  * or 0 if not.
@@ -1014,7 +1041,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp)
 	return 0;
 	return 0;
 }
 }
 
 
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+static int power_pmu_event_init(struct perf_event *event)
 {
 {
 	u64 ev;
 	u64 ev;
 	unsigned long flags;
 	unsigned long flags;
@@ -1026,25 +1053,27 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
 	struct cpu_hw_events *cpuhw;
 	struct cpu_hw_events *cpuhw;
 
 
 	if (!ppmu)
 	if (!ppmu)
-		return ERR_PTR(-ENXIO);
+		return -ENOENT;
+
 	switch (event->attr.type) {
 	switch (event->attr.type) {
 	case PERF_TYPE_HARDWARE:
 	case PERF_TYPE_HARDWARE:
 		ev = event->attr.config;
 		ev = event->attr.config;
 		if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
 		if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
-			return ERR_PTR(-EOPNOTSUPP);
+			return -EOPNOTSUPP;
 		ev = ppmu->generic_events[ev];
 		ev = ppmu->generic_events[ev];
 		break;
 		break;
 	case PERF_TYPE_HW_CACHE:
 	case PERF_TYPE_HW_CACHE:
 		err = hw_perf_cache_event(event->attr.config, &ev);
 		err = hw_perf_cache_event(event->attr.config, &ev);
 		if (err)
 		if (err)
-			return ERR_PTR(err);
+			return err;
 		break;
 		break;
 	case PERF_TYPE_RAW:
 	case PERF_TYPE_RAW:
 		ev = event->attr.config;
 		ev = event->attr.config;
 		break;
 		break;
 	default:
 	default:
-		return ERR_PTR(-EINVAL);
+		return -ENOENT;
 	}
 	}
+
 	event->hw.config_base = ev;
 	event->hw.config_base = ev;
 	event->hw.idx = 0;
 	event->hw.idx = 0;
 
 
@@ -1081,7 +1110,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
 			 */
 			 */
 			ev = normal_pmc_alternative(ev, flags);
 			ev = normal_pmc_alternative(ev, flags);
 			if (!ev)
 			if (!ev)
-				return ERR_PTR(-EINVAL);
+				return -EINVAL;
 		}
 		}
 	}
 	}
 
 
@@ -1095,19 +1124,19 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
 		n = collect_events(event->group_leader, ppmu->n_counter - 1,
 		n = collect_events(event->group_leader, ppmu->n_counter - 1,
 				   ctrs, events, cflags);
 				   ctrs, events, cflags);
 		if (n < 0)
 		if (n < 0)
-			return ERR_PTR(-EINVAL);
+			return -EINVAL;
 	}
 	}
 	events[n] = ev;
 	events[n] = ev;
 	ctrs[n] = event;
 	ctrs[n] = event;
 	cflags[n] = flags;
 	cflags[n] = flags;
 	if (check_excludes(ctrs, cflags, n, 1))
 	if (check_excludes(ctrs, cflags, n, 1))
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 
 
 	cpuhw = &get_cpu_var(cpu_hw_events);
 	cpuhw = &get_cpu_var(cpu_hw_events);
 	err = power_check_constraints(cpuhw, events, cflags, n + 1);
 	err = power_check_constraints(cpuhw, events, cflags, n + 1);
 	put_cpu_var(cpu_hw_events);
 	put_cpu_var(cpu_hw_events);
 	if (err)
 	if (err)
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 
 
 	event->hw.config = events[n];
 	event->hw.config = events[n];
 	event->hw.event_base = cflags[n];
 	event->hw.event_base = cflags[n];
@@ -1132,11 +1161,23 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
 	}
 	}
 	event->destroy = hw_perf_event_destroy;
 	event->destroy = hw_perf_event_destroy;
 
 
-	if (err)
-		return ERR_PTR(err);
-	return &power_pmu;
+	return err;
 }
 }
 
 
+struct pmu power_pmu = {
+	.pmu_enable	= power_pmu_enable,
+	.pmu_disable	= power_pmu_disable,
+	.event_init	= power_pmu_event_init,
+	.add		= power_pmu_add,
+	.del		= power_pmu_del,
+	.start		= power_pmu_start,
+	.stop		= power_pmu_stop,
+	.read		= power_pmu_read,
+	.start_txn	= power_pmu_start_txn,
+	.cancel_txn	= power_pmu_cancel_txn,
+	.commit_txn	= power_pmu_commit_txn,
+};
+
 /*
 /*
  * A counter has overflowed; update its count and record
  * A counter has overflowed; update its count and record
  * things if requested.  Note that interrupts are hard-disabled
  * things if requested.  Note that interrupts are hard-disabled
@@ -1149,6 +1190,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
 	s64 prev, delta, left;
 	s64 prev, delta, left;
 	int record = 0;
 	int record = 0;
 
 
+	if (event->hw.state & PERF_HES_STOPPED) {
+		write_pmc(event->hw.idx, 0);
+		return;
+	}
+
 	/* we don't have to worry about interrupts here */
 	/* we don't have to worry about interrupts here */
 	prev = local64_read(&event->hw.prev_count);
 	prev = local64_read(&event->hw.prev_count);
 	delta = (val - prev) & 0xfffffffful;
 	delta = (val - prev) & 0xfffffffful;
@@ -1171,6 +1217,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
 			val = 0x80000000LL - left;
 			val = 0x80000000LL - left;
 	}
 	}
 
 
+	write_pmc(event->hw.idx, val);
+	local64_set(&event->hw.prev_count, val);
+	local64_set(&event->hw.period_left, left);
+	perf_event_update_userpage(event);
+
 	/*
 	/*
 	 * Finally record data if requested.
 	 * Finally record data if requested.
 	 */
 	 */
@@ -1183,23 +1234,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
 		if (event->attr.sample_type & PERF_SAMPLE_ADDR)
 		if (event->attr.sample_type & PERF_SAMPLE_ADDR)
 			perf_get_data_addr(regs, &data.addr);
 			perf_get_data_addr(regs, &data.addr);
 
 
-		if (perf_event_overflow(event, nmi, &data, regs)) {
-			/*
-			 * Interrupts are coming too fast - throttle them
-			 * by setting the event to 0, so it will be
-			 * at least 2^30 cycles until the next interrupt
-			 * (assuming each event counts at most 2 counts
-			 * per cycle).
-			 */
-			val = 0;
-			left = ~0ULL >> 1;
-		}
+		if (perf_event_overflow(event, nmi, &data, regs))
+			power_pmu_stop(event, 0);
 	}
 	}
-
-	write_pmc(event->hw.idx, val);
-	local64_set(&event->hw.prev_count, val);
-	local64_set(&event->hw.period_left, left);
-	perf_event_update_userpage(event);
 }
 }
 
 
 /*
 /*
@@ -1342,6 +1379,7 @@ int register_power_pmu(struct power_pmu *pmu)
 		freeze_events_kernel = MMCR0_FCHV;
 		freeze_events_kernel = MMCR0_FCHV;
 #endif /* CONFIG_PPC64 */
 #endif /* CONFIG_PPC64 */
 
 
+	perf_pmu_register(&power_pmu);
 	perf_cpu_notifier(power_pmu_notifier);
 	perf_cpu_notifier(power_pmu_notifier);
 
 
 	return 0;
 	return 0;

+ 90 - 58
arch/powerpc/kernel/perf_event_fsl_emb.c

@@ -156,6 +156,9 @@ static void fsl_emb_pmu_read(struct perf_event *event)
 {
 {
 	s64 val, delta, prev;
 	s64 val, delta, prev;
 
 
+	if (event->hw.state & PERF_HES_STOPPED)
+		return;
+
 	/*
 	/*
 	 * Performance monitor interrupts come even when interrupts
 	 * Performance monitor interrupts come even when interrupts
 	 * are soft-disabled, as long as interrupts are hard-enabled.
 	 * are soft-disabled, as long as interrupts are hard-enabled.
@@ -177,7 +180,7 @@ static void fsl_emb_pmu_read(struct perf_event *event)
  * Disable all events to prevent PMU interrupts and to allow
  * Disable all events to prevent PMU interrupts and to allow
  * events to be added or removed.
  * events to be added or removed.
  */
  */
-void hw_perf_disable(void)
+static void fsl_emb_pmu_disable(struct pmu *pmu)
 {
 {
 	struct cpu_hw_events *cpuhw;
 	struct cpu_hw_events *cpuhw;
 	unsigned long flags;
 	unsigned long flags;
@@ -216,7 +219,7 @@ void hw_perf_disable(void)
  * If we were previously disabled and events were added, then
  * If we were previously disabled and events were added, then
  * put the new config on the PMU.
  * put the new config on the PMU.
  */
  */
-void hw_perf_enable(void)
+static void fsl_emb_pmu_enable(struct pmu *pmu)
 {
 {
 	struct cpu_hw_events *cpuhw;
 	struct cpu_hw_events *cpuhw;
 	unsigned long flags;
 	unsigned long flags;
@@ -262,8 +265,8 @@ static int collect_events(struct perf_event *group, int max_count,
 	return n;
 	return n;
 }
 }
 
 
-/* perf must be disabled, context locked on entry */
-static int fsl_emb_pmu_enable(struct perf_event *event)
+/* context locked on entry */
+static int fsl_emb_pmu_add(struct perf_event *event, int flags)
 {
 {
 	struct cpu_hw_events *cpuhw;
 	struct cpu_hw_events *cpuhw;
 	int ret = -EAGAIN;
 	int ret = -EAGAIN;
@@ -271,6 +274,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
 	u64 val;
 	u64 val;
 	int i;
 	int i;
 
 
+	perf_pmu_disable(event->pmu);
 	cpuhw = &get_cpu_var(cpu_hw_events);
 	cpuhw = &get_cpu_var(cpu_hw_events);
 
 
 	if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
 	if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
@@ -301,6 +305,12 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
 			val = 0x80000000L - left;
 			val = 0x80000000L - left;
 	}
 	}
 	local64_set(&event->hw.prev_count, val);
 	local64_set(&event->hw.prev_count, val);
+
+	if (!(flags & PERF_EF_START)) {
+		event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+		val = 0;
+	}
+
 	write_pmc(i, val);
 	write_pmc(i, val);
 	perf_event_update_userpage(event);
 	perf_event_update_userpage(event);
 
 
@@ -310,15 +320,17 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
 	ret = 0;
 	ret = 0;
  out:
  out:
 	put_cpu_var(cpu_hw_events);
 	put_cpu_var(cpu_hw_events);
+	perf_pmu_enable(event->pmu);
 	return ret;
 	return ret;
 }
 }
 
 
-/* perf must be disabled, context locked on entry */
-static void fsl_emb_pmu_disable(struct perf_event *event)
+/* context locked on entry */
+static void fsl_emb_pmu_del(struct perf_event *event, int flags)
 {
 {
 	struct cpu_hw_events *cpuhw;
 	struct cpu_hw_events *cpuhw;
 	int i = event->hw.idx;
 	int i = event->hw.idx;
 
 
+	perf_pmu_disable(event->pmu);
 	if (i < 0)
 	if (i < 0)
 		goto out;
 		goto out;
 
 
@@ -346,44 +358,57 @@ static void fsl_emb_pmu_disable(struct perf_event *event)
 	cpuhw->n_events--;
 	cpuhw->n_events--;
 
 
  out:
  out:
+	perf_pmu_enable(event->pmu);
 	put_cpu_var(cpu_hw_events);
 	put_cpu_var(cpu_hw_events);
 }
 }
 
 
-/*
- * Re-enable interrupts on a event after they were throttled
- * because they were coming too fast.
- *
- * Context is locked on entry, but perf is not disabled.
- */
-static void fsl_emb_pmu_unthrottle(struct perf_event *event)
+static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags)
 {
 {
-	s64 val, left;
 	unsigned long flags;
 	unsigned long flags;
+	s64 left;
 
 
 	if (event->hw.idx < 0 || !event->hw.sample_period)
 	if (event->hw.idx < 0 || !event->hw.sample_period)
 		return;
 		return;
+
+	if (!(event->hw.state & PERF_HES_STOPPED))
+		return;
+
+	if (ef_flags & PERF_EF_RELOAD)
+		WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+
 	local_irq_save(flags);
 	local_irq_save(flags);
-	perf_disable();
-	fsl_emb_pmu_read(event);
-	left = event->hw.sample_period;
-	event->hw.last_period = left;
-	val = 0;
-	if (left < 0x80000000L)
-		val = 0x80000000L - left;
-	write_pmc(event->hw.idx, val);
-	local64_set(&event->hw.prev_count, val);
-	local64_set(&event->hw.period_left, left);
+	perf_pmu_disable(event->pmu);
+
+	event->hw.state = 0;
+	left = local64_read(&event->hw.period_left);
+	write_pmc(event->hw.idx, left);
+
 	perf_event_update_userpage(event);
 	perf_event_update_userpage(event);
-	perf_enable();
+	perf_pmu_enable(event->pmu);
 	local_irq_restore(flags);
 	local_irq_restore(flags);
 }
 }
 
 
-static struct pmu fsl_emb_pmu = {
-	.enable		= fsl_emb_pmu_enable,
-	.disable	= fsl_emb_pmu_disable,
-	.read		= fsl_emb_pmu_read,
-	.unthrottle	= fsl_emb_pmu_unthrottle,
-};
+static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags)
+{
+	unsigned long flags;
+
+	if (event->hw.idx < 0 || !event->hw.sample_period)
+		return;
+
+	if (event->hw.state & PERF_HES_STOPPED)
+		return;
+
+	local_irq_save(flags);
+	perf_pmu_disable(event->pmu);
+
+	fsl_emb_pmu_read(event);
+	event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+	write_pmc(event->hw.idx, 0);
+
+	perf_event_update_userpage(event);
+	perf_pmu_enable(event->pmu);
+	local_irq_restore(flags);
+}
 
 
 /*
 /*
  * Release the PMU if this is the last perf_event.
  * Release the PMU if this is the last perf_event.
@@ -428,7 +453,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp)
 	return 0;
 	return 0;
 }
 }
 
 
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+static int fsl_emb_pmu_event_init(struct perf_event *event)
 {
 {
 	u64 ev;
 	u64 ev;
 	struct perf_event *events[MAX_HWEVENTS];
 	struct perf_event *events[MAX_HWEVENTS];
@@ -441,14 +466,14 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
 	case PERF_TYPE_HARDWARE:
 	case PERF_TYPE_HARDWARE:
 		ev = event->attr.config;
 		ev = event->attr.config;
 		if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
 		if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
-			return ERR_PTR(-EOPNOTSUPP);
+			return -EOPNOTSUPP;
 		ev = ppmu->generic_events[ev];
 		ev = ppmu->generic_events[ev];
 		break;
 		break;
 
 
 	case PERF_TYPE_HW_CACHE:
 	case PERF_TYPE_HW_CACHE:
 		err = hw_perf_cache_event(event->attr.config, &ev);
 		err = hw_perf_cache_event(event->attr.config, &ev);
 		if (err)
 		if (err)
-			return ERR_PTR(err);
+			return err;
 		break;
 		break;
 
 
 	case PERF_TYPE_RAW:
 	case PERF_TYPE_RAW:
@@ -456,12 +481,12 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
 		break;
 		break;
 
 
 	default:
 	default:
-		return ERR_PTR(-EINVAL);
+		return -ENOENT;
 	}
 	}
 
 
 	event->hw.config = ppmu->xlate_event(ev);
 	event->hw.config = ppmu->xlate_event(ev);
 	if (!(event->hw.config & FSL_EMB_EVENT_VALID))
 	if (!(event->hw.config & FSL_EMB_EVENT_VALID))
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 
 
 	/*
 	/*
 	 * If this is in a group, check if it can go on with all the
 	 * If this is in a group, check if it can go on with all the
@@ -473,7 +498,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
 		n = collect_events(event->group_leader,
 		n = collect_events(event->group_leader,
 		                   ppmu->n_counter - 1, events);
 		                   ppmu->n_counter - 1, events);
 		if (n < 0)
 		if (n < 0)
-			return ERR_PTR(-EINVAL);
+			return -EINVAL;
 	}
 	}
 
 
 	if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) {
 	if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) {
@@ -484,7 +509,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
 		}
 		}
 
 
 		if (num_restricted >= ppmu->n_restricted)
 		if (num_restricted >= ppmu->n_restricted)
-			return ERR_PTR(-EINVAL);
+			return -EINVAL;
 	}
 	}
 
 
 	event->hw.idx = -1;
 	event->hw.idx = -1;
@@ -497,7 +522,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
 	if (event->attr.exclude_kernel)
 	if (event->attr.exclude_kernel)
 		event->hw.config_base |= PMLCA_FCS;
 		event->hw.config_base |= PMLCA_FCS;
 	if (event->attr.exclude_idle)
 	if (event->attr.exclude_idle)
-		return ERR_PTR(-ENOTSUPP);
+		return -ENOTSUPP;
 
 
 	event->hw.last_period = event->hw.sample_period;
 	event->hw.last_period = event->hw.sample_period;
 	local64_set(&event->hw.period_left, event->hw.last_period);
 	local64_set(&event->hw.period_left, event->hw.last_period);
@@ -523,11 +548,20 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
 	}
 	}
 	event->destroy = hw_perf_event_destroy;
 	event->destroy = hw_perf_event_destroy;
 
 
-	if (err)
-		return ERR_PTR(err);
-	return &fsl_emb_pmu;
+	return err;
 }
 }
 
 
+static struct pmu fsl_emb_pmu = {
+	.pmu_enable	= fsl_emb_pmu_enable,
+	.pmu_disable	= fsl_emb_pmu_disable,
+	.event_init	= fsl_emb_pmu_event_init,
+	.add		= fsl_emb_pmu_add,
+	.del		= fsl_emb_pmu_del,
+	.start		= fsl_emb_pmu_start,
+	.stop		= fsl_emb_pmu_stop,
+	.read		= fsl_emb_pmu_read,
+};
+
 /*
 /*
  * A counter has overflowed; update its count and record
  * A counter has overflowed; update its count and record
  * things if requested.  Note that interrupts are hard-disabled
  * things if requested.  Note that interrupts are hard-disabled
@@ -540,6 +574,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
 	s64 prev, delta, left;
 	s64 prev, delta, left;
 	int record = 0;
 	int record = 0;
 
 
+	if (event->hw.state & PERF_HES_STOPPED) {
+		write_pmc(event->hw.idx, 0);
+		return;
+	}
+
 	/* we don't have to worry about interrupts here */
 	/* we don't have to worry about interrupts here */
 	prev = local64_read(&event->hw.prev_count);
 	prev = local64_read(&event->hw.prev_count);
 	delta = (val - prev) & 0xfffffffful;
 	delta = (val - prev) & 0xfffffffful;
@@ -562,6 +601,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
 			val = 0x80000000LL - left;
 			val = 0x80000000LL - left;
 	}
 	}
 
 
+	write_pmc(event->hw.idx, val);
+	local64_set(&event->hw.prev_count, val);
+	local64_set(&event->hw.period_left, left);
+	perf_event_update_userpage(event);
+
 	/*
 	/*
 	 * Finally record data if requested.
 	 * Finally record data if requested.
 	 */
 	 */
@@ -571,23 +615,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
 		perf_sample_data_init(&data, 0);
 		perf_sample_data_init(&data, 0);
 		data.period = event->hw.last_period;
 		data.period = event->hw.last_period;
 
 
-		if (perf_event_overflow(event, nmi, &data, regs)) {
-			/*
-			 * Interrupts are coming too fast - throttle them
-			 * by setting the event to 0, so it will be
-			 * at least 2^30 cycles until the next interrupt
-			 * (assuming each event counts at most 2 counts
-			 * per cycle).
-			 */
-			val = 0;
-			left = ~0ULL >> 1;
-		}
+		if (perf_event_overflow(event, nmi, &data, regs))
+			fsl_emb_pmu_stop(event, 0);
 	}
 	}
-
-	write_pmc(event->hw.idx, val);
-	local64_set(&event->hw.prev_count, val);
-	local64_set(&event->hw.period_left, left);
-	perf_event_update_userpage(event);
 }
 }
 
 
 static void perf_event_interrupt(struct pt_regs *regs)
 static void perf_event_interrupt(struct pt_regs *regs)
@@ -651,5 +681,7 @@ int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu)
 	pr_info("%s performance monitor hardware support registered\n",
 	pr_info("%s performance monitor hardware support registered\n",
 		pmu->name);
 		pmu->name);
 
 
+	perf_pmu_register(&fsl_emb_pmu);
+
 	return 0;
 	return 0;
 }
 }

+ 1 - 1
arch/powerpc/platforms/512x/clock.c

@@ -57,7 +57,7 @@ static struct clk *mpc5121_clk_get(struct device *dev, const char *id)
 	int id_match = 0;
 	int id_match = 0;
 
 
 	if (dev == NULL || id == NULL)
 	if (dev == NULL || id == NULL)
-		return NULL;
+		return clk;
 
 
 	mutex_lock(&clocks_mutex);
 	mutex_lock(&clocks_mutex);
 	list_for_each_entry(p, &clocks, node) {
 	list_for_each_entry(p, &clocks, node) {

+ 6 - 3
arch/powerpc/platforms/52xx/efika.c

@@ -99,7 +99,7 @@ static void __init efika_pcisetup(void)
 	if (bus_range == NULL || len < 2 * sizeof(int)) {
 	if (bus_range == NULL || len < 2 * sizeof(int)) {
 		printk(KERN_WARNING EFIKA_PLATFORM_NAME
 		printk(KERN_WARNING EFIKA_PLATFORM_NAME
 		       ": Can't get bus-range for %s\n", pcictrl->full_name);
 		       ": Can't get bus-range for %s\n", pcictrl->full_name);
-		return;
+		goto out_put;
 	}
 	}
 
 
 	if (bus_range[1] == bus_range[0])
 	if (bus_range[1] == bus_range[0])
@@ -111,12 +111,12 @@ static void __init efika_pcisetup(void)
 	printk(" controlled by %s\n", pcictrl->full_name);
 	printk(" controlled by %s\n", pcictrl->full_name);
 	printk("\n");
 	printk("\n");
 
 
-	hose = pcibios_alloc_controller(of_node_get(pcictrl));
+	hose = pcibios_alloc_controller(pcictrl);
 	if (!hose) {
 	if (!hose) {
 		printk(KERN_WARNING EFIKA_PLATFORM_NAME
 		printk(KERN_WARNING EFIKA_PLATFORM_NAME
 		       ": Can't allocate PCI controller structure for %s\n",
 		       ": Can't allocate PCI controller structure for %s\n",
 		       pcictrl->full_name);
 		       pcictrl->full_name);
-		return;
+		goto out_put;
 	}
 	}
 
 
 	hose->first_busno = bus_range[0];
 	hose->first_busno = bus_range[0];
@@ -124,6 +124,9 @@ static void __init efika_pcisetup(void)
 	hose->ops = &rtas_pci_ops;
 	hose->ops = &rtas_pci_ops;
 
 
 	pci_process_bridge_OF_ranges(hose, pcictrl, 0);
 	pci_process_bridge_OF_ranges(hose, pcictrl, 0);
+	return;
+out_put:
+	of_node_put(pcictrl);
 }
 }
 
 
 #else
 #else

+ 6 - 2
arch/powerpc/platforms/52xx/mpc52xx_common.c

@@ -325,12 +325,16 @@ int mpc5200_psc_ac97_gpio_reset(int psc_number)
 	clrbits32(&simple_gpio->simple_dvo, sync | out);
 	clrbits32(&simple_gpio->simple_dvo, sync | out);
 	clrbits8(&wkup_gpio->wkup_dvo, reset);
 	clrbits8(&wkup_gpio->wkup_dvo, reset);
 
 
-	/* wait at lease 1 us */
-	udelay(2);
+	/* wait for 1 us */
+	udelay(1);
 
 
 	/* Deassert reset */
 	/* Deassert reset */
 	setbits8(&wkup_gpio->wkup_dvo, reset);
 	setbits8(&wkup_gpio->wkup_dvo, reset);
 
 
+	/* wait at least 200ns */
+	/* 7 ~= (200ns * timebase) / ns2sec */
+	__delay(7);
+
 	/* Restore pin-muxing */
 	/* Restore pin-muxing */
 	out_be32(&simple_gpio->port_config, mux);
 	out_be32(&simple_gpio->port_config, mux);
 
 

+ 1 - 2
arch/s390/kernel/module.c

@@ -407,10 +407,9 @@ int module_finalize(const Elf_Ehdr *hdr,
 {
 {
 	vfree(me->arch.syminfo);
 	vfree(me->arch.syminfo);
 	me->arch.syminfo = NULL;
 	me->arch.syminfo = NULL;
-	return module_bug_finalize(hdr, sechdrs, me);
+	return 0;
 }
 }
 
 
 void module_arch_cleanup(struct module *mod)
 void module_arch_cleanup(struct module *mod)
 {
 {
-	module_bug_cleanup(mod);
 }
 }

+ 0 - 2
arch/sh/kernel/module.c

@@ -149,13 +149,11 @@ int module_finalize(const Elf_Ehdr *hdr,
 	int ret = 0;
 	int ret = 0;
 
 
 	ret |= module_dwarf_finalize(hdr, sechdrs, me);
 	ret |= module_dwarf_finalize(hdr, sechdrs, me);
-	ret |= module_bug_finalize(hdr, sechdrs, me);
 
 
 	return ret;
 	return ret;
 }
 }
 
 
 void module_arch_cleanup(struct module *mod)
 void module_arch_cleanup(struct module *mod)
 {
 {
-	module_bug_cleanup(mod);
 	module_dwarf_cleanup(mod);
 	module_dwarf_cleanup(mod);
 }
 }

+ 4 - 46
arch/sh/kernel/perf_callchain.c

@@ -14,11 +14,6 @@
 #include <asm/unwinder.h>
 #include <asm/unwinder.h>
 #include <asm/ptrace.h>
 #include <asm/ptrace.h>
 
 
-static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
-{
-	if (entry->nr < PERF_MAX_STACK_DEPTH)
-		entry->ip[entry->nr++] = ip;
-}
 
 
 static void callchain_warning(void *data, char *msg)
 static void callchain_warning(void *data, char *msg)
 {
 {
@@ -39,7 +34,7 @@ static void callchain_address(void *data, unsigned long addr, int reliable)
 	struct perf_callchain_entry *entry = data;
 	struct perf_callchain_entry *entry = data;
 
 
 	if (reliable)
 	if (reliable)
-		callchain_store(entry, addr);
+		perf_callchain_store(entry, addr);
 }
 }
 
 
 static const struct stacktrace_ops callchain_ops = {
 static const struct stacktrace_ops callchain_ops = {
@@ -49,47 +44,10 @@ static const struct stacktrace_ops callchain_ops = {
 	.address	= callchain_address,
 	.address	= callchain_address,
 };
 };
 
 
-static void
-perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
+void
+perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
 {
-	callchain_store(entry, PERF_CONTEXT_KERNEL);
-	callchain_store(entry, regs->pc);
+	perf_callchain_store(entry, regs->pc);
 
 
 	unwind_stack(NULL, regs, NULL, &callchain_ops, entry);
 	unwind_stack(NULL, regs, NULL, &callchain_ops, entry);
 }
 }
-
-static void
-perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
-{
-	int is_user;
-
-	if (!regs)
-		return;
-
-	is_user = user_mode(regs);
-
-	if (is_user && current->state != TASK_RUNNING)
-		return;
-
-	/*
-	 * Only the kernel side is implemented for now.
-	 */
-	if (!is_user)
-		perf_callchain_kernel(regs, entry);
-}
-
-/*
- * No need for separate IRQ and NMI entries.
- */
-static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
-
-struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
-{
-	struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
-
-	entry->nr = 0;
-
-	perf_do_callchain(regs, entry);
-
-	return entry;
-}

+ 94 - 47
arch/sh/kernel/perf_event.c

@@ -224,50 +224,80 @@ again:
 	local64_add(delta, &event->count);
 	local64_add(delta, &event->count);
 }
 }
 
 
-static void sh_pmu_disable(struct perf_event *event)
+static void sh_pmu_stop(struct perf_event *event, int flags)
 {
 {
 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 	struct hw_perf_event *hwc = &event->hw;
 	struct hw_perf_event *hwc = &event->hw;
 	int idx = hwc->idx;
 	int idx = hwc->idx;
 
 
-	clear_bit(idx, cpuc->active_mask);
-	sh_pmu->disable(hwc, idx);
+	if (!(event->hw.state & PERF_HES_STOPPED)) {
+		sh_pmu->disable(hwc, idx);
+		cpuc->events[idx] = NULL;
+		event->hw.state |= PERF_HES_STOPPED;
+	}
+
+	if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
+		sh_perf_event_update(event, &event->hw, idx);
+		event->hw.state |= PERF_HES_UPTODATE;
+	}
+}
 
 
-	barrier();
+static void sh_pmu_start(struct perf_event *event, int flags)
+{
+	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+	struct hw_perf_event *hwc = &event->hw;
+	int idx = hwc->idx;
 
 
-	sh_perf_event_update(event, &event->hw, idx);
+	if (WARN_ON_ONCE(idx == -1))
+		return;
+
+	if (flags & PERF_EF_RELOAD)
+		WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+
+	cpuc->events[idx] = event;
+	event->hw.state = 0;
+	sh_pmu->enable(hwc, idx);
+}
+
+static void sh_pmu_del(struct perf_event *event, int flags)
+{
+	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
 
-	cpuc->events[idx] = NULL;
-	clear_bit(idx, cpuc->used_mask);
+	sh_pmu_stop(event, PERF_EF_UPDATE);
+	__clear_bit(event->hw.idx, cpuc->used_mask);
 
 
 	perf_event_update_userpage(event);
 	perf_event_update_userpage(event);
 }
 }
 
 
-static int sh_pmu_enable(struct perf_event *event)
+static int sh_pmu_add(struct perf_event *event, int flags)
 {
 {
 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 	struct hw_perf_event *hwc = &event->hw;
 	struct hw_perf_event *hwc = &event->hw;
 	int idx = hwc->idx;
 	int idx = hwc->idx;
+	int ret = -EAGAIN;
+
+	perf_pmu_disable(event->pmu);
 
 
-	if (test_and_set_bit(idx, cpuc->used_mask)) {
+	if (__test_and_set_bit(idx, cpuc->used_mask)) {
 		idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events);
 		idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events);
 		if (idx == sh_pmu->num_events)
 		if (idx == sh_pmu->num_events)
-			return -EAGAIN;
+			goto out;
 
 
-		set_bit(idx, cpuc->used_mask);
+		__set_bit(idx, cpuc->used_mask);
 		hwc->idx = idx;
 		hwc->idx = idx;
 	}
 	}
 
 
 	sh_pmu->disable(hwc, idx);
 	sh_pmu->disable(hwc, idx);
 
 
-	cpuc->events[idx] = event;
-	set_bit(idx, cpuc->active_mask);
-
-	sh_pmu->enable(hwc, idx);
+	event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+	if (flags & PERF_EF_START)
+		sh_pmu_start(event, PERF_EF_RELOAD);
 
 
 	perf_event_update_userpage(event);
 	perf_event_update_userpage(event);
-
-	return 0;
+	ret = 0;
+out:
+	perf_pmu_enable(event->pmu);
+	return ret;
 }
 }
 
 
 static void sh_pmu_read(struct perf_event *event)
 static void sh_pmu_read(struct perf_event *event)
@@ -275,24 +305,56 @@ static void sh_pmu_read(struct perf_event *event)
 	sh_perf_event_update(event, &event->hw, event->hw.idx);
 	sh_perf_event_update(event, &event->hw, event->hw.idx);
 }
 }
 
 
-static const struct pmu pmu = {
-	.enable		= sh_pmu_enable,
-	.disable	= sh_pmu_disable,
-	.read		= sh_pmu_read,
-};
-
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+static int sh_pmu_event_init(struct perf_event *event)
 {
 {
-	int err = __hw_perf_event_init(event);
+	int err;
+
+	switch (event->attr.type) {
+	case PERF_TYPE_RAW:
+	case PERF_TYPE_HW_CACHE:
+	case PERF_TYPE_HARDWARE:
+		err = __hw_perf_event_init(event);
+		break;
+
+	default:
+		return -ENOENT;
+	}
+
 	if (unlikely(err)) {
 	if (unlikely(err)) {
 		if (event->destroy)
 		if (event->destroy)
 			event->destroy(event);
 			event->destroy(event);
-		return ERR_PTR(err);
 	}
 	}
 
 
-	return &pmu;
+	return err;
+}
+
+static void sh_pmu_enable(struct pmu *pmu)
+{
+	if (!sh_pmu_initialized())
+		return;
+
+	sh_pmu->enable_all();
+}
+
+static void sh_pmu_disable(struct pmu *pmu)
+{
+	if (!sh_pmu_initialized())
+		return;
+
+	sh_pmu->disable_all();
 }
 }
 
 
+static struct pmu pmu = {
+	.pmu_enable	= sh_pmu_enable,
+	.pmu_disable	= sh_pmu_disable,
+	.event_init	= sh_pmu_event_init,
+	.add		= sh_pmu_add,
+	.del		= sh_pmu_del,
+	.start		= sh_pmu_start,
+	.stop		= sh_pmu_stop,
+	.read		= sh_pmu_read,
+};
+
 static void sh_pmu_setup(int cpu)
 static void sh_pmu_setup(int cpu)
 {
 {
 	struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
 	struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
@@ -317,32 +379,17 @@ sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
 	return NOTIFY_OK;
 	return NOTIFY_OK;
 }
 }
 
 
-void hw_perf_enable(void)
-{
-	if (!sh_pmu_initialized())
-		return;
-
-	sh_pmu->enable_all();
-}
-
-void hw_perf_disable(void)
-{
-	if (!sh_pmu_initialized())
-		return;
-
-	sh_pmu->disable_all();
-}
-
-int __cpuinit register_sh_pmu(struct sh_pmu *pmu)
+int __cpuinit register_sh_pmu(struct sh_pmu *_pmu)
 {
 {
 	if (sh_pmu)
 	if (sh_pmu)
 		return -EBUSY;
 		return -EBUSY;
-	sh_pmu = pmu;
+	sh_pmu = _pmu;
 
 
-	pr_info("Performance Events: %s support registered\n", pmu->name);
+	pr_info("Performance Events: %s support registered\n", _pmu->name);
 
 
-	WARN_ON(pmu->num_events > MAX_HWEVENTS);
+	WARN_ON(_pmu->num_events > MAX_HWEVENTS);
 
 
+	perf_pmu_register(&pmu);
 	perf_cpu_notifier(sh_pmu_notifier);
 	perf_cpu_notifier(sh_pmu_notifier);
 	return 0;
 	return 0;
 }
 }

+ 1 - 0
arch/sparc/Kconfig

@@ -30,6 +30,7 @@ config SPARC
 	select PERF_USE_VMALLOC
 	select PERF_USE_VMALLOC
 	select HAVE_DMA_ATTRS
 	select HAVE_DMA_ATTRS
 	select HAVE_DMA_API_DEBUG
 	select HAVE_DMA_API_DEBUG
+	select HAVE_ARCH_JUMP_LABEL
 
 
 config SPARC32
 config SPARC32
 	def_bool !64BIT
 	def_bool !64BIT

+ 32 - 0
arch/sparc/include/asm/jump_label.h

@@ -0,0 +1,32 @@
+#ifndef _ASM_SPARC_JUMP_LABEL_H
+#define _ASM_SPARC_JUMP_LABEL_H
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <asm/system.h>
+
+#define JUMP_LABEL_NOP_SIZE 4
+
+#define JUMP_LABEL(key, label)					\
+	do {							\
+		asm goto("1:\n\t"				\
+			 "nop\n\t"				\
+			 "nop\n\t"				\
+			 ".pushsection __jump_table,  \"a\"\n\t"\
+			 ".word 1b, %l[" #label "], %c0\n\t"	\
+			 ".popsection \n\t"			\
+			 : :  "i" (key) :  : label);\
+	} while (0)
+
+#endif /* __KERNEL__ */
+
+typedef u32 jump_label_t;
+
+struct jump_entry {
+	jump_label_t code;
+	jump_label_t target;
+	jump_label_t key;
+};
+
+#endif

部分文件因为文件数量过多而无法显示