浏览代码

Merge tag 's3c24xx-dma' of git://git.kernel.org/pub/scm/linux/kernel/git/kgene/linux-samsung into next/drivers

From Kukjin Kim, this branch adds device-tree support to the DMA controller
on the older Samsung SoCs. It also adds support for one of the missing SoCs
in the family (2410).

The driver has been Ack:ed by Vinod Koul, but is merged through here due
to dependencies with platform code.

* tag 's3c24xx-dma' of git://git.kernel.org/pub/scm/linux/kernel/git/kgene/linux-samsung:
  ARM: S3C24XX: add dma pdata for s3c2410, s3c2440 and s3c2442
  dmaengine: s3c24xx-dma: add support for the s3c2410 type of controller
  ARM: S3C24XX: Fix possible dma selection warning
  ARM: SAMSUNG: set s3c24xx_dma_filter for s3c64xx-spi0 device
  ARM: S3C24XX: add platform-devices for new dma driver for s3c2412 and s3c2443
  dmaengine: add driver for Samsung s3c24xx SoCs
  ARM: S3C24XX: number the dma clocks
  + Linux 3.12-rc3

Signed-off-by: Olof Johansson <olof@lixom.net>
Olof Johansson 12 年之前
父节点
当前提交
3316dee245
共有 100 个文件被更改,包括 2210 次插入368 次删除
  1. 1 2
      CREDITS
  2. 5 5
      Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
  3. 5 5
      Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
  4. 4 4
      Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
  5. 1 1
      Documentation/devicetree/bindings/pci/designware-pcie.txt
  6. 0 0
      Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt
  7. 4 0
      Documentation/kernel-parameters.txt
  8. 6 0
      Documentation/sound/alsa/HD-Audio-Models.txt
  9. 16 4
      MAINTAINERS
  10. 1 1
      Makefile
  11. 0 3
      arch/Kconfig
  12. 1 2
      arch/arm/Kconfig
  13. 3 3
      arch/arm/crypto/aes-armv4.S
  14. 7 0
      arch/arm/include/asm/uaccess.h
  15. 2 2
      arch/arm/kernel/entry-common.S
  16. 4 4
      arch/arm/kernel/entry-header.S
  17. 2 1
      arch/arm/mach-s3c24xx/Kconfig
  18. 4 4
      arch/arm/mach-s3c24xx/clock-s3c2412.c
  19. 6 6
      arch/arm/mach-s3c24xx/common-s3c2443.c
  20. 206 0
      arch/arm/mach-s3c24xx/common.c
  21. 5 0
      arch/arm/mach-s3c24xx/common.h
  22. 1 0
      arch/arm/mach-s3c24xx/mach-jive.c
  23. 1 0
      arch/arm/mach-s3c24xx/mach-smdk2413.c
  24. 1 0
      arch/arm/mach-s3c24xx/mach-smdk2416.c
  25. 1 0
      arch/arm/mach-s3c24xx/mach-smdk2443.c
  26. 1 0
      arch/arm/mach-s3c24xx/mach-vstms.c
  27. 4 1
      arch/arm/plat-samsung/devs.c
  28. 1 1
      arch/mips/include/asm/cpu-features.h
  29. 4 8
      arch/mips/mm/dma-default.c
  30. 0 44
      arch/openrisc/include/asm/prom.h
  31. 2 2
      arch/powerpc/boot/Makefile
  32. 9 0
      arch/powerpc/boot/epapr-wrapper.c
  33. 2 2
      arch/powerpc/boot/epapr.c
  34. 15 1
      arch/powerpc/boot/of.c
  35. 5 4
      arch/powerpc/boot/wrapper
  36. 2 2
      arch/powerpc/include/asm/irq.h
  37. 1 3
      arch/powerpc/include/asm/processor.h
  38. 2 1
      arch/powerpc/kernel/asm-offsets.c
  39. 44 56
      arch/powerpc/kernel/irq.c
  40. 20 5
      arch/powerpc/kernel/misc_32.S
  41. 4 6
      arch/powerpc/kernel/misc_64.S
  42. 2 1
      arch/powerpc/kernel/process.c
  43. 21 0
      arch/powerpc/kernel/prom_init.c
  44. 2 1
      arch/powerpc/lib/sstep.c
  45. 16 10
      arch/powerpc/platforms/pseries/smp.c
  46. 1 1
      arch/s390/Kconfig
  47. 0 2
      arch/s390/include/asm/mutex.h
  48. 2 0
      arch/s390/include/asm/processor.h
  49. 5 0
      arch/s390/include/asm/spinlock.h
  50. 20 11
      arch/x86/include/asm/xen/page.h
  51. 6 6
      arch/x86/kernel/cpu/perf_event.c
  52. 1 0
      arch/x86/kernel/cpu/perf_event_intel.c
  53. 5 5
      arch/x86/kernel/cpu/perf_event_intel_uncore.c
  54. 1 0
      arch/x86/kernel/microcode_amd.c
  55. 17 1
      arch/x86/kernel/reboot.c
  56. 7 4
      arch/x86/platform/efi/efi.c
  57. 4 6
      arch/x86/xen/p2m.c
  58. 24 2
      arch/x86/xen/spinlock.c
  59. 14 10
      drivers/acpi/acpi_ipmi.c
  60. 1 1
      drivers/acpi/scan.c
  61. 1 1
      drivers/ata/sata_promise.c
  62. 7 7
      drivers/base/core.c
  63. 1 0
      drivers/block/cciss.c
  64. 1 0
      drivers/block/cpqarray.c
  65. 0 36
      drivers/char/tpm/xen-tpmfront.c
  66. 1 0
      drivers/clocksource/Kconfig
  67. 3 0
      drivers/clocksource/clksrc-of.c
  68. 1 1
      drivers/clocksource/em_sti.c
  69. 9 1
      drivers/clocksource/exynos_mct.c
  70. 4 0
      drivers/cpufreq/acpi-cpufreq.c
  71. 3 0
      drivers/cpufreq/cpufreq.c
  72. 1 1
      drivers/cpufreq/exynos5440-cpufreq.c
  73. 12 0
      drivers/dma/Kconfig
  74. 1 0
      drivers/dma/Makefile
  75. 1350 0
      drivers/dma/s3c24xx-dma.c
  76. 1 2
      drivers/gpu/drm/i2c/tda998x_drv.c
  77. 4 4
      drivers/gpu/drm/i915/i915_gem.c
  78. 4 2
      drivers/gpu/drm/i915/i915_gpu_error.c
  79. 4 0
      drivers/gpu/drm/i915/intel_display.c
  80. 12 1
      drivers/gpu/drm/i915/intel_dp.c
  81. 8 0
      drivers/gpu/drm/i915/intel_tv.c
  82. 0 2
      drivers/gpu/drm/msm/mdp4/mdp4_kms.c
  83. 4 4
      drivers/gpu/drm/msm/msm_drv.c
  84. 0 7
      drivers/gpu/drm/msm/msm_gem.c
  85. 51 0
      drivers/gpu/drm/radeon/btc_dpm.c
  86. 2 0
      drivers/gpu/drm/radeon/btc_dpm.h
  87. 26 0
      drivers/gpu/drm/radeon/ci_dpm.c
  88. 8 9
      drivers/gpu/drm/radeon/cik.c
  89. 24 0
      drivers/gpu/drm/radeon/ni_dpm.c
  90. 5 3
      drivers/gpu/drm/radeon/r100.c
  91. 1 1
      drivers/gpu/drm/radeon/r600_dpm.c
  92. 15 5
      drivers/gpu/drm/radeon/r600_hdmi.c
  93. 2 0
      drivers/gpu/drm/radeon/radeon_asic.c
  94. 43 23
      drivers/gpu/drm/radeon/radeon_atombios.c
  95. 3 2
      drivers/gpu/drm/radeon/radeon_cs.c
  96. 12 3
      drivers/gpu/drm/radeon/radeon_device.c
  97. 4 4
      drivers/gpu/drm/radeon/radeon_pm.c
  98. 5 3
      drivers/gpu/drm/radeon/radeon_ring.c
  99. 1 2
      drivers/gpu/drm/radeon/radeon_uvd.c
  100. 24 0
      drivers/gpu/drm/radeon/si_dpm.c

+ 1 - 2
CREDITS

@@ -2808,8 +2808,7 @@ S: Ottawa, Ontario
 S: Canada K2P 0X8
 S: Canada K2P 0X8
 
 
 N: Mikael Pettersson
 N: Mikael Pettersson
-E: mikpe@it.uu.se
-W: http://user.it.uu.se/~mikpe/linux/
+E: mikpelinux@gmail.com
 D: Miscellaneous fixes
 D: Miscellaneous fixes
 
 
 N: Reed H. Petty
 N: Reed H. Petty

+ 5 - 5
Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt

@@ -1,11 +1,11 @@
-* Samsung Exynos specific extensions to the Synopsis Designware Mobile
+* Samsung Exynos specific extensions to the Synopsys Designware Mobile
   Storage Host Controller
   Storage Host Controller
 
 
-The Synopsis designware mobile storage host controller is used to interface
+The Synopsys designware mobile storage host controller is used to interface
 a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
 a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
-differences between the core Synopsis dw mshc controller properties described
-by synopsis-dw-mshc.txt and the properties used by the Samsung Exynos specific
-extensions to the Synopsis Designware Mobile Storage Host Controller.
+differences between the core Synopsys dw mshc controller properties described
+by synopsys-dw-mshc.txt and the properties used by the Samsung Exynos specific
+extensions to the Synopsys Designware Mobile Storage Host Controller.
 
 
 Required Properties:
 Required Properties:
 
 

+ 5 - 5
Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt

@@ -1,11 +1,11 @@
-* Rockchip specific extensions to the Synopsis Designware Mobile
+* Rockchip specific extensions to the Synopsys Designware Mobile
   Storage Host Controller
   Storage Host Controller
 
 
-The Synopsis designware mobile storage host controller is used to interface
+The Synopsys designware mobile storage host controller is used to interface
 a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
 a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
-differences between the core Synopsis dw mshc controller properties described
-by synopsis-dw-mshc.txt and the properties used by the Rockchip specific
-extensions to the Synopsis Designware Mobile Storage Host Controller.
+differences between the core Synopsys dw mshc controller properties described
+by synopsys-dw-mshc.txt and the properties used by the Rockchip specific
+extensions to the Synopsys Designware Mobile Storage Host Controller.
 
 
 Required Properties:
 Required Properties:
 
 

+ 4 - 4
Documentation/devicetree/bindings/mmc/synopsis-dw-mshc.txt → Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt

@@ -1,14 +1,14 @@
-* Synopsis Designware Mobile Storage Host Controller
+* Synopsys Designware Mobile Storage Host Controller
 
 
-The Synopsis designware mobile storage host controller is used to interface
+The Synopsys designware mobile storage host controller is used to interface
 a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
 a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
 differences between the core mmc properties described by mmc.txt and the
 differences between the core mmc properties described by mmc.txt and the
-properties used by the Synopsis Designware Mobile Storage Host Controller.
+properties used by the Synopsys Designware Mobile Storage Host Controller.
 
 
 Required Properties:
 Required Properties:
 
 
 * compatible: should be
 * compatible: should be
-	- snps,dw-mshc: for controllers compliant with synopsis dw-mshc.
+	- snps,dw-mshc: for controllers compliant with synopsys dw-mshc.
 * #address-cells: should be 1.
 * #address-cells: should be 1.
 * #size-cells: should be 0.
 * #size-cells: should be 0.
 
 

+ 1 - 1
Documentation/devicetree/bindings/pci/designware-pcie.txt

@@ -1,4 +1,4 @@
-* Synopsis Designware PCIe interface
+* Synopsys Designware PCIe interface
 
 
 Required properties:
 Required properties:
 - compatible: should contain "snps,dw-pcie" to identify the
 - compatible: should contain "snps,dw-pcie" to identify the

+ 0 - 0
Documentation/devicetree/bindings/tty/serial/qca,ar9330-uart.txt → Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt


+ 4 - 0
Documentation/kernel-parameters.txt

@@ -3485,6 +3485,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 				the unplug protocol
 				the unplug protocol
 			never -- do not unplug even if version check succeeds
 			never -- do not unplug even if version check succeeds
 
 
+	xen_nopvspin	[X86,XEN]
+			Disables the ticketlock slowpath using Xen PV
+			optimizations.
+
 	xirc2ps_cs=	[NET,PCMCIA]
 	xirc2ps_cs=	[NET,PCMCIA]
 			Format:
 			Format:
 			<irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
 			<irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]

+ 6 - 0
Documentation/sound/alsa/HD-Audio-Models.txt

@@ -296,6 +296,12 @@ Cirrus Logic CS4206/4207
   imac27	IMac 27 Inch
   imac27	IMac 27 Inch
   auto		BIOS setup (default)
   auto		BIOS setup (default)
 
 
+Cirrus Logic CS4208
+===================
+  mba6		MacBook Air 6,1 and 6,2
+  gpio0		Enable GPIO 0 amp
+  auto		BIOS setup (default)
+
 VIA VT17xx/VT18xx/VT20xx
 VIA VT17xx/VT18xx/VT20xx
 ========================
 ========================
   auto		BIOS setup (default)
   auto		BIOS setup (default)

+ 16 - 4
MAINTAINERS

@@ -1812,7 +1812,8 @@ S:	Supported
 F:	drivers/net/ethernet/broadcom/bnx2x/
 F:	drivers/net/ethernet/broadcom/bnx2x/
 
 
 BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE
 BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE
-M:	Christian Daudt <csd@broadcom.com>
+M:	Christian Daudt <bcm@fixthebug.org>
+L:	bcm-kernel-feedback-list@broadcom.com
 T:	git git://git.github.com/broadcom/bcm11351
 T:	git git://git.github.com/broadcom/bcm11351
 S:	Maintained
 S:	Maintained
 F:	arch/arm/mach-bcm/
 F:	arch/arm/mach-bcm/
@@ -2639,6 +2640,18 @@ F:	include/linux/device-mapper.h
 F:	include/linux/dm-*.h
 F:	include/linux/dm-*.h
 F:	include/uapi/linux/dm-*.h
 F:	include/uapi/linux/dm-*.h
 
 
+DIGI NEO AND CLASSIC PCI PRODUCTS
+M:	Lidza Louina <lidza.louina@gmail.com>
+L:	driverdev-devel@linuxdriverproject.org
+S:	Maintained
+F:	drivers/staging/dgnc/
+
+DIGI EPCA PCI PRODUCTS
+M:	Lidza Louina <lidza.louina@gmail.com>
+L:	driverdev-devel@linuxdriverproject.org
+S:	Maintained
+F:	drivers/staging/dgap/
+
 DIOLAN U2C-12 I2C DRIVER
 DIOLAN U2C-12 I2C DRIVER
 M:	Guenter Roeck <linux@roeck-us.net>
 M:	Guenter Roeck <linux@roeck-us.net>
 L:	linux-i2c@vger.kernel.org
 L:	linux-i2c@vger.kernel.org
@@ -6595,7 +6608,7 @@ S:	Obsolete
 F:	drivers/net/wireless/prism54/
 F:	drivers/net/wireless/prism54/
 
 
 PROMISE SATA TX2/TX4 CONTROLLER LIBATA DRIVER
 PROMISE SATA TX2/TX4 CONTROLLER LIBATA DRIVER
-M:	Mikael Pettersson <mikpe@it.uu.se>
+M:	Mikael Pettersson <mikpelinux@gmail.com>
 L:	linux-ide@vger.kernel.org
 L:	linux-ide@vger.kernel.org
 S:	Maintained
 S:	Maintained
 F:	drivers/ata/sata_promise.*
 F:	drivers/ata/sata_promise.*
@@ -8724,9 +8737,8 @@ F:	Documentation/hid/hiddev.txt
 F:	drivers/hid/usbhid/
 F:	drivers/hid/usbhid/
 
 
 USB/IP DRIVERS
 USB/IP DRIVERS
-M:	Matt Mooney <mfm@muteddisk.com>
 L:	linux-usb@vger.kernel.org
 L:	linux-usb@vger.kernel.org
-S:	Maintained
+S:	Orphan
 F:	drivers/staging/usbip/
 F:	drivers/staging/usbip/
 
 
 USB ISP116X DRIVER
 USB ISP116X DRIVER

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 3
 VERSION = 3
 PATCHLEVEL = 12
 PATCHLEVEL = 12
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
 NAME = One Giant Leap for Frogkind
 NAME = One Giant Leap for Frogkind
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*

+ 0 - 3
arch/Kconfig

@@ -286,9 +286,6 @@ config HAVE_PERF_USER_STACK_DUMP
 config HAVE_ARCH_JUMP_LABEL
 config HAVE_ARCH_JUMP_LABEL
 	bool
 	bool
 
 
-config HAVE_ARCH_MUTEX_CPU_RELAX
-	bool
-
 config HAVE_RCU_TABLE_FREE
 config HAVE_RCU_TABLE_FREE
 	bool
 	bool
 
 

+ 1 - 2
arch/arm/Kconfig

@@ -2216,8 +2216,7 @@ config NEON
 
 
 config KERNEL_MODE_NEON
 config KERNEL_MODE_NEON
 	bool "Support for NEON in kernel mode"
 	bool "Support for NEON in kernel mode"
-	default n
-	depends on NEON
+	depends on NEON && AEABI
 	help
 	help
 	  Say Y to include support for NEON in kernel mode.
 	  Say Y to include support for NEON in kernel mode.
 
 

+ 3 - 3
arch/arm/crypto/aes-armv4.S

@@ -148,7 +148,7 @@ AES_Te:
 @ 		 const AES_KEY *key) {
 @ 		 const AES_KEY *key) {
 .align	5
 .align	5
 ENTRY(AES_encrypt)
 ENTRY(AES_encrypt)
-	sub	r3,pc,#8		@ AES_encrypt
+	adr	r3,AES_encrypt
 	stmdb   sp!,{r1,r4-r12,lr}
 	stmdb   sp!,{r1,r4-r12,lr}
 	mov	r12,r0		@ inp
 	mov	r12,r0		@ inp
 	mov	r11,r2
 	mov	r11,r2
@@ -381,7 +381,7 @@ _armv4_AES_encrypt:
 .align	5
 .align	5
 ENTRY(private_AES_set_encrypt_key)
 ENTRY(private_AES_set_encrypt_key)
 _armv4_AES_set_encrypt_key:
 _armv4_AES_set_encrypt_key:
-	sub	r3,pc,#8		@ AES_set_encrypt_key
+	adr	r3,_armv4_AES_set_encrypt_key
 	teq	r0,#0
 	teq	r0,#0
 	moveq	r0,#-1
 	moveq	r0,#-1
 	beq	.Labrt
 	beq	.Labrt
@@ -843,7 +843,7 @@ AES_Td:
 @ 		 const AES_KEY *key) {
 @ 		 const AES_KEY *key) {
 .align	5
 .align	5
 ENTRY(AES_decrypt)
 ENTRY(AES_decrypt)
-	sub	r3,pc,#8		@ AES_decrypt
+	adr	r3,AES_decrypt
 	stmdb   sp!,{r1,r4-r12,lr}
 	stmdb   sp!,{r1,r4-r12,lr}
 	mov	r12,r0		@ inp
 	mov	r12,r0		@ inp
 	mov	r11,r2
 	mov	r11,r2

+ 7 - 0
arch/arm/include/asm/uaccess.h

@@ -19,6 +19,13 @@
 #include <asm/unified.h>
 #include <asm/unified.h>
 #include <asm/compiler.h>
 #include <asm/compiler.h>
 
 
+#if __LINUX_ARM_ARCH__ < 6
+#include <asm-generic/uaccess-unaligned.h>
+#else
+#define __get_user_unaligned __get_user
+#define __put_user_unaligned __put_user
+#endif
+
 #define VERIFY_READ 0
 #define VERIFY_READ 0
 #define VERIFY_WRITE 1
 #define VERIFY_WRITE 1
 
 

+ 2 - 2
arch/arm/kernel/entry-common.S

@@ -442,10 +442,10 @@ local_restart:
 	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine
 	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine
 
 
 	add	r1, sp, #S_OFF
 	add	r1, sp, #S_OFF
-	cmp	scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
+2:	cmp	scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
 	eor	r0, scno, #__NR_SYSCALL_BASE	@ put OS number back
 	eor	r0, scno, #__NR_SYSCALL_BASE	@ put OS number back
 	bcs	arm_syscall
 	bcs	arm_syscall
-2:	mov	why, #0				@ no longer a real syscall
+	mov	why, #0				@ no longer a real syscall
 	b	sys_ni_syscall			@ not private func
 	b	sys_ni_syscall			@ not private func
 
 
 #if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
 #if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)

+ 4 - 4
arch/arm/kernel/entry-header.S

@@ -329,10 +329,10 @@
 #ifdef CONFIG_CONTEXT_TRACKING
 #ifdef CONFIG_CONTEXT_TRACKING
 	.if	\save
 	.if	\save
 	stmdb   sp!, {r0-r3, ip, lr}
 	stmdb   sp!, {r0-r3, ip, lr}
-	bl	user_exit
+	bl	context_tracking_user_exit
 	ldmia	sp!, {r0-r3, ip, lr}
 	ldmia	sp!, {r0-r3, ip, lr}
 	.else
 	.else
-	bl	user_exit
+	bl	context_tracking_user_exit
 	.endif
 	.endif
 #endif
 #endif
 	.endm
 	.endm
@@ -341,10 +341,10 @@
 #ifdef CONFIG_CONTEXT_TRACKING
 #ifdef CONFIG_CONTEXT_TRACKING
 	.if	\save
 	.if	\save
 	stmdb   sp!, {r0-r3, ip, lr}
 	stmdb   sp!, {r0-r3, ip, lr}
-	bl	user_enter
+	bl	context_tracking_user_enter
 	ldmia	sp!, {r0-r3, ip, lr}
 	ldmia	sp!, {r0-r3, ip, lr}
 	.else
 	.else
-	bl	user_enter
+	bl	context_tracking_user_enter
 	.endif
 	.endif
 #endif
 #endif
 	.endm
 	.endm

+ 2 - 1
arch/arm/mach-s3c24xx/Kconfig

@@ -28,6 +28,7 @@ config CPU_S3C2410
 	select CPU_ARM920T
 	select CPU_ARM920T
 	select CPU_LLSERIAL_S3C2410
 	select CPU_LLSERIAL_S3C2410
 	select S3C2410_CLOCK
 	select S3C2410_CLOCK
+	select S3C2410_DMA if S3C24XX_DMA
 	select ARM_S3C2410_CPUFREQ if ARM_S3C24XX_CPUFREQ
 	select ARM_S3C2410_CPUFREQ if ARM_S3C24XX_CPUFREQ
 	select S3C2410_PM if PM
 	select S3C2410_PM if PM
 	select SAMSUNG_WDT_RESET
 	select SAMSUNG_WDT_RESET
@@ -70,6 +71,7 @@ config CPU_S3C2442
 	select CPU_ARM920T
 	select CPU_ARM920T
 	select CPU_LLSERIAL_S3C2440
 	select CPU_LLSERIAL_S3C2440
 	select S3C2410_CLOCK
 	select S3C2410_CLOCK
+	select S3C2410_DMA if S3C24XX_DMA
 	select S3C2410_PM if PM
 	select S3C2410_PM if PM
 	help
 	help
 	  Support for S3C2442 Samsung Mobile CPU based systems.
 	  Support for S3C2442 Samsung Mobile CPU based systems.
@@ -148,7 +150,6 @@ config S3C2410_DMA_DEBUG
 config S3C2410_DMA
 config S3C2410_DMA
 	bool
 	bool
 	depends on S3C24XX_DMA && (CPU_S3C2410 || CPU_S3C2442)
 	depends on S3C24XX_DMA && (CPU_S3C2410 || CPU_S3C2442)
-	default y if CPU_S3C2410 || CPU_S3C2442
 	help
 	help
 	  DMA device selection for S3C2410 and compatible CPUs
 	  DMA device selection for S3C2410 and compatible CPUs
 
 

+ 4 - 4
arch/arm/mach-s3c24xx/clock-s3c2412.c

@@ -484,22 +484,22 @@ static struct clk init_clocks_disable[] = {
 
 
 static struct clk init_clocks[] = {
 static struct clk init_clocks[] = {
 	{
 	{
-		.name		= "dma",
+		.name		= "dma.0",
 		.parent		= &clk_h,
 		.parent		= &clk_h,
 		.enable		= s3c2412_clkcon_enable,
 		.enable		= s3c2412_clkcon_enable,
 		.ctrlbit	= S3C2412_CLKCON_DMA0,
 		.ctrlbit	= S3C2412_CLKCON_DMA0,
 	}, {
 	}, {
-		.name		= "dma",
+		.name		= "dma.1",
 		.parent		= &clk_h,
 		.parent		= &clk_h,
 		.enable		= s3c2412_clkcon_enable,
 		.enable		= s3c2412_clkcon_enable,
 		.ctrlbit	= S3C2412_CLKCON_DMA1,
 		.ctrlbit	= S3C2412_CLKCON_DMA1,
 	}, {
 	}, {
-		.name		= "dma",
+		.name		= "dma.2",
 		.parent		= &clk_h,
 		.parent		= &clk_h,
 		.enable		= s3c2412_clkcon_enable,
 		.enable		= s3c2412_clkcon_enable,
 		.ctrlbit	= S3C2412_CLKCON_DMA2,
 		.ctrlbit	= S3C2412_CLKCON_DMA2,
 	}, {
 	}, {
-		.name		= "dma",
+		.name		= "dma.3",
 		.parent		= &clk_h,
 		.parent		= &clk_h,
 		.enable		= s3c2412_clkcon_enable,
 		.enable		= s3c2412_clkcon_enable,
 		.ctrlbit	= S3C2412_CLKCON_DMA3,
 		.ctrlbit	= S3C2412_CLKCON_DMA3,

+ 6 - 6
arch/arm/mach-s3c24xx/common-s3c2443.c

@@ -438,32 +438,32 @@ static struct clk init_clocks_off[] = {
 
 
 static struct clk init_clocks[] = {
 static struct clk init_clocks[] = {
 	{
 	{
-		.name		= "dma",
+		.name		= "dma.0",
 		.parent		= &clk_h,
 		.parent		= &clk_h,
 		.enable		= s3c2443_clkcon_enable_h,
 		.enable		= s3c2443_clkcon_enable_h,
 		.ctrlbit	= S3C2443_HCLKCON_DMA0,
 		.ctrlbit	= S3C2443_HCLKCON_DMA0,
 	}, {
 	}, {
-		.name		= "dma",
+		.name		= "dma.1",
 		.parent		= &clk_h,
 		.parent		= &clk_h,
 		.enable		= s3c2443_clkcon_enable_h,
 		.enable		= s3c2443_clkcon_enable_h,
 		.ctrlbit	= S3C2443_HCLKCON_DMA1,
 		.ctrlbit	= S3C2443_HCLKCON_DMA1,
 	}, {
 	}, {
-		.name		= "dma",
+		.name		= "dma.2",
 		.parent		= &clk_h,
 		.parent		= &clk_h,
 		.enable		= s3c2443_clkcon_enable_h,
 		.enable		= s3c2443_clkcon_enable_h,
 		.ctrlbit	= S3C2443_HCLKCON_DMA2,
 		.ctrlbit	= S3C2443_HCLKCON_DMA2,
 	}, {
 	}, {
-		.name		= "dma",
+		.name		= "dma.3",
 		.parent		= &clk_h,
 		.parent		= &clk_h,
 		.enable		= s3c2443_clkcon_enable_h,
 		.enable		= s3c2443_clkcon_enable_h,
 		.ctrlbit	= S3C2443_HCLKCON_DMA3,
 		.ctrlbit	= S3C2443_HCLKCON_DMA3,
 	}, {
 	}, {
-		.name		= "dma",
+		.name		= "dma.4",
 		.parent		= &clk_h,
 		.parent		= &clk_h,
 		.enable		= s3c2443_clkcon_enable_h,
 		.enable		= s3c2443_clkcon_enable_h,
 		.ctrlbit	= S3C2443_HCLKCON_DMA4,
 		.ctrlbit	= S3C2443_HCLKCON_DMA4,
 	}, {
 	}, {
-		.name		= "dma",
+		.name		= "dma.5",
 		.parent		= &clk_h,
 		.parent		= &clk_h,
 		.enable		= s3c2443_clkcon_enable_h,
 		.enable		= s3c2443_clkcon_enable_h,
 		.ctrlbit	= S3C2443_HCLKCON_DMA5,
 		.ctrlbit	= S3C2443_HCLKCON_DMA5,

+ 206 - 0
arch/arm/mach-s3c24xx/common.c

@@ -31,6 +31,7 @@
 #include <linux/platform_device.h>
 #include <linux/platform_device.h>
 #include <linux/delay.h>
 #include <linux/delay.h>
 #include <linux/io.h>
 #include <linux/io.h>
+#include <linux/platform_data/dma-s3c24xx.h>
 
 
 #include <mach/hardware.h>
 #include <mach/hardware.h>
 #include <mach/regs-clock.h>
 #include <mach/regs-clock.h>
@@ -44,6 +45,7 @@
 
 
 #include <mach/regs-gpio.h>
 #include <mach/regs-gpio.h>
 #include <plat/regs-serial.h>
 #include <plat/regs-serial.h>
+#include <mach/dma.h>
 
 
 #include <plat/cpu.h>
 #include <plat/cpu.h>
 #include <plat/devs.h>
 #include <plat/devs.h>
@@ -329,3 +331,207 @@ void __init_or_cpufreq s3c24xx_setup_clocks(unsigned long fclk,
 	clk_p.rate = pclk;
 	clk_p.rate = pclk;
 	clk_f.rate = fclk;
 	clk_f.rate = fclk;
 }
 }
+
+#if defined(CONFIG_CPU_S3C2410) || defined(CONFIG_CPU_S3C2412) || \
+	defined(CONFIG_CPU_S3C2440) || defined(CONFIG_CPU_S3C2442)
+static struct resource s3c2410_dma_resource[] = {
+	[0] = DEFINE_RES_MEM(S3C24XX_PA_DMA, S3C24XX_SZ_DMA),
+	[1] = DEFINE_RES_IRQ(IRQ_DMA0),
+	[2] = DEFINE_RES_IRQ(IRQ_DMA1),
+	[3] = DEFINE_RES_IRQ(IRQ_DMA2),
+	[4] = DEFINE_RES_IRQ(IRQ_DMA3),
+};
+#endif
+
+#if defined(CONFIG_CPU_S3C2410) || defined(CONFIG_CPU_S3C2442)
+static struct s3c24xx_dma_channel s3c2410_dma_channels[DMACH_MAX] = {
+	[DMACH_XD0] = { S3C24XX_DMA_AHB, true, S3C24XX_DMA_CHANREQ(0, 0), },
+	[DMACH_XD1] = { S3C24XX_DMA_AHB, true, S3C24XX_DMA_CHANREQ(0, 1), },
+	[DMACH_SDI] = { S3C24XX_DMA_APB, false, S3C24XX_DMA_CHANREQ(2, 0) |
+						S3C24XX_DMA_CHANREQ(2, 2) |
+						S3C24XX_DMA_CHANREQ(1, 3),
+	},
+	[DMACH_SPI0] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(3, 1), },
+	[DMACH_SPI1] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(2, 3), },
+	[DMACH_UART0] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(1, 0), },
+	[DMACH_UART1] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(1, 1), },
+	[DMACH_UART2] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(0, 3), },
+	[DMACH_TIMER] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(3, 0) |
+						 S3C24XX_DMA_CHANREQ(3, 2) |
+						 S3C24XX_DMA_CHANREQ(3, 3),
+	},
+	[DMACH_I2S_IN] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(2, 1) |
+						  S3C24XX_DMA_CHANREQ(1, 2),
+	},
+	[DMACH_I2S_OUT] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(0, 2), },
+	[DMACH_USB_EP1] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 0), },
+	[DMACH_USB_EP2] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 1), },
+	[DMACH_USB_EP3] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 2), },
+	[DMACH_USB_EP4] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 3), },
+};
+
+static struct s3c24xx_dma_platdata s3c2410_dma_platdata = {
+	.num_phy_channels = 4,
+	.channels = s3c2410_dma_channels,
+	.num_channels = DMACH_MAX,
+};
+
+struct platform_device s3c2410_device_dma = {
+	.name		= "s3c2410-dma",
+	.id		= 0,
+	.num_resources	= ARRAY_SIZE(s3c2410_dma_resource),
+	.resource	= s3c2410_dma_resource,
+	.dev	= {
+		.platform_data	= &s3c2410_dma_platdata,
+	},
+};
+#endif
+
+#ifdef CONFIG_CPU_S3C2412
+static struct s3c24xx_dma_channel s3c2412_dma_channels[DMACH_MAX] = {
+	[DMACH_XD0] = { S3C24XX_DMA_AHB, true, 17 },
+	[DMACH_XD1] = { S3C24XX_DMA_AHB, true, 18 },
+	[DMACH_SDI] = { S3C24XX_DMA_APB, false, 10 },
+	[DMACH_SPI0_RX] = { S3C24XX_DMA_APB, true, 1 },
+	[DMACH_SPI0_TX] = { S3C24XX_DMA_APB, true, 0 },
+	[DMACH_SPI1_RX] = { S3C24XX_DMA_APB, true, 3 },
+	[DMACH_SPI1_TX] = { S3C24XX_DMA_APB, true, 2 },
+	[DMACH_UART0] = { S3C24XX_DMA_APB, true, 19 },
+	[DMACH_UART1] = { S3C24XX_DMA_APB, true, 21 },
+	[DMACH_UART2] = { S3C24XX_DMA_APB, true, 23 },
+	[DMACH_UART0_SRC2] = { S3C24XX_DMA_APB, true, 20 },
+	[DMACH_UART1_SRC2] = { S3C24XX_DMA_APB, true, 22 },
+	[DMACH_UART2_SRC2] = { S3C24XX_DMA_APB, true, 24 },
+	[DMACH_TIMER] = { S3C24XX_DMA_APB, true, 9 },
+	[DMACH_I2S_IN] = { S3C24XX_DMA_APB, true, 5 },
+	[DMACH_I2S_OUT] = { S3C24XX_DMA_APB, true, 4 },
+	[DMACH_USB_EP1] = { S3C24XX_DMA_APB, true, 13 },
+	[DMACH_USB_EP2] = { S3C24XX_DMA_APB, true, 14 },
+	[DMACH_USB_EP3] = { S3C24XX_DMA_APB, true, 15 },
+	[DMACH_USB_EP4] = { S3C24XX_DMA_APB, true, 16 },
+};
+
+static struct s3c24xx_dma_platdata s3c2412_dma_platdata = {
+	.num_phy_channels = 4,
+	.channels = s3c2412_dma_channels,
+	.num_channels = DMACH_MAX,
+};
+
+struct platform_device s3c2412_device_dma = {
+	.name		= "s3c2412-dma",
+	.id		= 0,
+	.num_resources	= ARRAY_SIZE(s3c2410_dma_resource),
+	.resource	= s3c2410_dma_resource,
+	.dev	= {
+		.platform_data	= &s3c2412_dma_platdata,
+	},
+};
+#endif
+
+#if defined(CONFIG_CPU_S3C2440)
+static struct s3c24xx_dma_channel s3c2440_dma_channels[DMACH_MAX] = {
+	[DMACH_XD0] = { S3C24XX_DMA_AHB, true, S3C24XX_DMA_CHANREQ(0, 0), },
+	[DMACH_XD1] = { S3C24XX_DMA_AHB, true, S3C24XX_DMA_CHANREQ(0, 1), },
+	[DMACH_SDI] = { S3C24XX_DMA_APB, false, S3C24XX_DMA_CHANREQ(2, 0) |
+						S3C24XX_DMA_CHANREQ(6, 1) |
+						S3C24XX_DMA_CHANREQ(2, 2) |
+						S3C24XX_DMA_CHANREQ(1, 3),
+	},
+	[DMACH_SPI0] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(3, 1), },
+	[DMACH_SPI1] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(2, 3), },
+	[DMACH_UART0] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(1, 0), },
+	[DMACH_UART1] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(1, 1), },
+	[DMACH_UART2] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(0, 3), },
+	[DMACH_TIMER] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(3, 0) |
+						 S3C24XX_DMA_CHANREQ(3, 2) |
+						 S3C24XX_DMA_CHANREQ(3, 3),
+	},
+	[DMACH_I2S_IN] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(2, 1) |
+						  S3C24XX_DMA_CHANREQ(1, 2),
+	},
+	[DMACH_I2S_OUT] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(5, 0) |
+						   S3C24XX_DMA_CHANREQ(0, 2),
+	},
+	[DMACH_PCM_IN] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(6, 0) |
+						  S3C24XX_DMA_CHANREQ(5, 2),
+	},
+	[DMACH_PCM_OUT] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(5, 1) |
+						  S3C24XX_DMA_CHANREQ(6, 3),
+	},
+	[DMACH_MIC_IN] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(6, 2) |
+						  S3C24XX_DMA_CHANREQ(5, 3),
+	},
+	[DMACH_USB_EP1] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 0), },
+	[DMACH_USB_EP2] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 1), },
+	[DMACH_USB_EP3] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 2), },
+	[DMACH_USB_EP4] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 3), },
+};
+
+static struct s3c24xx_dma_platdata s3c2440_dma_platdata = {
+	.num_phy_channels = 4,
+	.channels = s3c2440_dma_channels,
+	.num_channels = DMACH_MAX,
+};
+
+struct platform_device s3c2440_device_dma = {
+	.name		= "s3c2410-dma",
+	.id		= 0,
+	.num_resources	= ARRAY_SIZE(s3c2410_dma_resource),
+	.resource	= s3c2410_dma_resource,
+	.dev	= {
+		.platform_data	= &s3c2440_dma_platdata,
+	},
+};
+#endif
+
+#if defined(CONFIG_CPUS_3C2443) || defined(CONFIG_CPU_S3C2416)
+static struct resource s3c2443_dma_resource[] = {
+	[0] = DEFINE_RES_MEM(S3C24XX_PA_DMA, S3C24XX_SZ_DMA),
+	[1] = DEFINE_RES_IRQ(IRQ_S3C2443_DMA0),
+	[2] = DEFINE_RES_IRQ(IRQ_S3C2443_DMA1),
+	[3] = DEFINE_RES_IRQ(IRQ_S3C2443_DMA2),
+	[4] = DEFINE_RES_IRQ(IRQ_S3C2443_DMA3),
+	[5] = DEFINE_RES_IRQ(IRQ_S3C2443_DMA4),
+	[6] = DEFINE_RES_IRQ(IRQ_S3C2443_DMA5),
+};
+
+static struct s3c24xx_dma_channel s3c2443_dma_channels[DMACH_MAX] = {
+	[DMACH_XD0] = { S3C24XX_DMA_AHB, true, 17 },
+	[DMACH_XD1] = { S3C24XX_DMA_AHB, true, 18 },
+	[DMACH_SDI] = { S3C24XX_DMA_APB, false, 10 },
+	[DMACH_SPI0_RX] = { S3C24XX_DMA_APB, true, 1 },
+	[DMACH_SPI0_TX] = { S3C24XX_DMA_APB, true, 0 },
+	[DMACH_SPI1_RX] = { S3C24XX_DMA_APB, true, 3 },
+	[DMACH_SPI1_TX] = { S3C24XX_DMA_APB, true, 2 },
+	[DMACH_UART0] = { S3C24XX_DMA_APB, true, 19 },
+	[DMACH_UART1] = { S3C24XX_DMA_APB, true, 21 },
+	[DMACH_UART2] = { S3C24XX_DMA_APB, true, 23 },
+	[DMACH_UART3] = { S3C24XX_DMA_APB, true, 25 },
+	[DMACH_UART0_SRC2] = { S3C24XX_DMA_APB, true, 20 },
+	[DMACH_UART1_SRC2] = { S3C24XX_DMA_APB, true, 22 },
+	[DMACH_UART2_SRC2] = { S3C24XX_DMA_APB, true, 24 },
+	[DMACH_UART3_SRC2] = { S3C24XX_DMA_APB, true, 26 },
+	[DMACH_TIMER] = { S3C24XX_DMA_APB, true, 9 },
+	[DMACH_I2S_IN] = { S3C24XX_DMA_APB, true, 5 },
+	[DMACH_I2S_OUT] = { S3C24XX_DMA_APB, true, 4 },
+	[DMACH_PCM_IN] = { S3C24XX_DMA_APB, true, 28 },
+	[DMACH_PCM_OUT] = { S3C24XX_DMA_APB, true, 27 },
+	[DMACH_MIC_IN] = { S3C24XX_DMA_APB, true, 29 },
+};
+
+static struct s3c24xx_dma_platdata s3c2443_dma_platdata = {
+	.num_phy_channels = 6,
+	.channels = s3c2443_dma_channels,
+	.num_channels = DMACH_MAX,
+};
+
+struct platform_device s3c2443_device_dma = {
+	.name		= "s3c2443-dma",
+	.id		= 0,
+	.num_resources	= ARRAY_SIZE(s3c2443_dma_resource),
+	.resource	= s3c2443_dma_resource,
+	.dev	= {
+		.platform_data	= &s3c2443_dma_platdata,
+	},
+};
+#endif

+ 5 - 0
arch/arm/mach-s3c24xx/common.h

@@ -109,4 +109,9 @@ extern void s3c2443_init_irq(void);
 
 
 extern struct syscore_ops s3c24xx_irq_syscore_ops;
 extern struct syscore_ops s3c24xx_irq_syscore_ops;
 
 
+extern struct platform_device s3c2410_device_dma;
+extern struct platform_device s3c2412_device_dma;
+extern struct platform_device s3c2440_device_dma;
+extern struct platform_device s3c2443_device_dma;
+
 #endif /* __ARCH_ARM_MACH_S3C24XX_COMMON_H */
 #endif /* __ARCH_ARM_MACH_S3C24XX_COMMON_H */

+ 1 - 0
arch/arm/mach-s3c24xx/mach-jive.c

@@ -466,6 +466,7 @@ static struct platform_device *jive_devices[] __initdata = {
 	&jive_device_wm8750,
 	&jive_device_wm8750,
 	&s3c_device_nand,
 	&s3c_device_nand,
 	&s3c_device_usbgadget,
 	&s3c_device_usbgadget,
+	&s3c2412_device_dma,
 };
 };
 
 
 static struct s3c2410_udc_mach_info jive_udc_cfg __initdata = {
 static struct s3c2410_udc_mach_info jive_udc_cfg __initdata = {

+ 1 - 0
arch/arm/mach-s3c24xx/mach-smdk2413.c

@@ -89,6 +89,7 @@ static struct platform_device *smdk2413_devices[] __initdata = {
 	&s3c_device_i2c0,
 	&s3c_device_i2c0,
 	&s3c_device_iis,
 	&s3c_device_iis,
 	&s3c_device_usbgadget,
 	&s3c_device_usbgadget,
+	&s3c2412_device_dma,
 };
 };
 
 
 static void __init smdk2413_fixup(struct tag *tags, char **cmdline,
 static void __init smdk2413_fixup(struct tag *tags, char **cmdline,

+ 1 - 0
arch/arm/mach-s3c24xx/mach-smdk2416.c

@@ -215,6 +215,7 @@ static struct platform_device *smdk2416_devices[] __initdata = {
 	&s3c_device_hsmmc0,
 	&s3c_device_hsmmc0,
 	&s3c_device_hsmmc1,
 	&s3c_device_hsmmc1,
 	&s3c_device_usb_hsudc,
 	&s3c_device_usb_hsudc,
+	&s3c2443_device_dma,
 };
 };
 
 
 static void __init smdk2416_map_io(void)
 static void __init smdk2416_map_io(void)

+ 1 - 0
arch/arm/mach-s3c24xx/mach-smdk2443.c

@@ -115,6 +115,7 @@ static struct platform_device *smdk2443_devices[] __initdata = {
 #ifdef CONFIG_SND_SOC_SMDK2443_WM9710
 #ifdef CONFIG_SND_SOC_SMDK2443_WM9710
 	&s3c_device_ac97,
 	&s3c_device_ac97,
 #endif
 #endif
+	&s3c2443_device_dma,
 };
 };
 
 
 static void __init smdk2443_map_io(void)
 static void __init smdk2443_map_io(void)

+ 1 - 0
arch/arm/mach-s3c24xx/mach-vstms.c

@@ -126,6 +126,7 @@ static struct platform_device *vstms_devices[] __initdata = {
 	&s3c_device_iis,
 	&s3c_device_iis,
 	&s3c_device_rtc,
 	&s3c_device_rtc,
 	&s3c_device_nand,
 	&s3c_device_nand,
+	&s3c2412_device_dma,
 };
 };
 
 
 static void __init vstms_fixup(struct tag *tags, char **cmdline,
 static void __init vstms_fixup(struct tag *tags, char **cmdline,

+ 4 - 1
arch/arm/plat-samsung/devs.c

@@ -32,6 +32,7 @@
 #include <linux/ioport.h>
 #include <linux/ioport.h>
 #include <linux/platform_data/s3c-hsudc.h>
 #include <linux/platform_data/s3c-hsudc.h>
 #include <linux/platform_data/s3c-hsotg.h>
 #include <linux/platform_data/s3c-hsotg.h>
+#include <linux/platform_data/dma-s3c24xx.h>
 
 
 #include <media/s5p_hdmi.h>
 #include <media/s5p_hdmi.h>
 
 
@@ -1499,8 +1500,10 @@ void __init s3c64xx_spi0_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
 	pd.num_cs = num_cs;
 	pd.num_cs = num_cs;
 	pd.src_clk_nr = src_clk_nr;
 	pd.src_clk_nr = src_clk_nr;
 	pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi0_cfg_gpio;
 	pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi0_cfg_gpio;
-#ifdef CONFIG_PL330_DMA
+#if defined(CONFIG_PL330_DMA)
 	pd.filter = pl330_filter;
 	pd.filter = pl330_filter;
+#elif defined(CONFIG_S3C24XX_DMAC)
+	pd.filter = s3c24xx_dma_filter;
 #endif
 #endif
 
 
 	s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi0);
 	s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi0);

+ 1 - 1
arch/mips/include/asm/cpu-features.h

@@ -187,7 +187,7 @@
 
 
 /*
 /*
  * MIPS32, MIPS64, VR5500, IDT32332, IDT32334 and maybe a few other
  * MIPS32, MIPS64, VR5500, IDT32332, IDT32334 and maybe a few other
- * pre-MIPS32/MIPS53 processors have CLO, CLZ.	The IDT RC64574 is 64-bit and
+ * pre-MIPS32/MIPS64 processors have CLO, CLZ.	The IDT RC64574 is 64-bit and
  * has CLO and CLZ but not DCLO nor DCLZ.  For 64-bit kernels
  * has CLO and CLZ but not DCLO nor DCLZ.  For 64-bit kernels
  * cpu_has_clo_clz also indicates the availability of DCLO and DCLZ.
  * cpu_has_clo_clz also indicates the availability of DCLO and DCLZ.
  */
  */

+ 4 - 8
arch/mips/mm/dma-default.c

@@ -308,12 +308,10 @@ static void mips_dma_sync_sg_for_cpu(struct device *dev,
 {
 {
 	int i;
 	int i;
 
 
-	/* Make sure that gcc doesn't leave the empty loop body.  */
-	for (i = 0; i < nelems; i++, sg++) {
-		if (cpu_needs_post_dma_flush(dev))
+	if (cpu_needs_post_dma_flush(dev))
+		for (i = 0; i < nelems; i++, sg++)
 			__dma_sync(sg_page(sg), sg->offset, sg->length,
 			__dma_sync(sg_page(sg), sg->offset, sg->length,
 				   direction);
 				   direction);
-	}
 }
 }
 
 
 static void mips_dma_sync_sg_for_device(struct device *dev,
 static void mips_dma_sync_sg_for_device(struct device *dev,
@@ -321,12 +319,10 @@ static void mips_dma_sync_sg_for_device(struct device *dev,
 {
 {
 	int i;
 	int i;
 
 
-	/* Make sure that gcc doesn't leave the empty loop body.  */
-	for (i = 0; i < nelems; i++, sg++) {
-		if (!plat_device_is_coherent(dev))
+	if (!plat_device_is_coherent(dev))
+		for (i = 0; i < nelems; i++, sg++)
 			__dma_sync(sg_page(sg), sg->offset, sg->length,
 			__dma_sync(sg_page(sg), sg->offset, sg->length,
 				   direction);
 				   direction);
-	}
 }
 }
 
 
 int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)

+ 0 - 44
arch/openrisc/include/asm/prom.h

@@ -14,53 +14,9 @@
  * the Free Software Foundation; either version 2 of the License, or
  * the Free Software Foundation; either version 2 of the License, or
  * (at your option) any later version.
  * (at your option) any later version.
  */
  */
-
-#include <linux/of.h>	/* linux/of.h gets to determine #include ordering */
-
 #ifndef _ASM_OPENRISC_PROM_H
 #ifndef _ASM_OPENRISC_PROM_H
 #define _ASM_OPENRISC_PROM_H
 #define _ASM_OPENRISC_PROM_H
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
 
 
-#include <linux/types.h>
-#include <asm/irq.h>
-#include <linux/irqdomain.h>
-#include <linux/atomic.h>
-#include <linux/of_irq.h>
-#include <linux/of_fdt.h>
-#include <linux/of_address.h>
-#include <linux/proc_fs.h>
-#include <linux/platform_device.h>
 #define HAVE_ARCH_DEVTREE_FIXUPS
 #define HAVE_ARCH_DEVTREE_FIXUPS
 
 
-/* Other Prototypes */
-extern int early_uartlite_console(void);
-
-/* Parse the ibm,dma-window property of an OF node into the busno, phys and
- * size parameters.
- */
-void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
-		unsigned long *busno, unsigned long *phys, unsigned long *size);
-
-extern void kdump_move_device_tree(void);
-
-/* Get the MAC address */
-extern const void *of_get_mac_address(struct device_node *np);
-
-/**
- * of_irq_map_pci - Resolve the interrupt for a PCI device
- * @pdev:	the device whose interrupt is to be resolved
- * @out_irq:	structure of_irq filled by this function
- *
- * This function resolves the PCI interrupt for a given PCI device. If a
- * device-node exists for a given pci_dev, it will use normal OF tree
- * walking. If not, it will implement standard swizzling and walk up the
- * PCI tree until an device-node is found, at which point it will finish
- * resolving using the OF tree walking.
- */
-struct pci_dev;
-extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
-
-#endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
 #endif /* _ASM_OPENRISC_PROM_H */
 #endif /* _ASM_OPENRISC_PROM_H */

+ 2 - 2
arch/powerpc/boot/Makefile

@@ -74,7 +74,7 @@ src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c
 src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c
 src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c
 src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c
 src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c
 
 
-src-plat-y := of.c
+src-plat-y := of.c epapr.c
 src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \
 src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \
 				treeboot-walnut.c cuboot-acadia.c \
 				treeboot-walnut.c cuboot-acadia.c \
 				cuboot-kilauea.c simpleboot.c \
 				cuboot-kilauea.c simpleboot.c \
@@ -97,7 +97,7 @@ src-plat-$(CONFIG_EMBEDDED6xx) += cuboot-pq2.c cuboot-mpc7448hpc2.c \
 					prpmc2800.c
 					prpmc2800.c
 src-plat-$(CONFIG_AMIGAONE) += cuboot-amigaone.c
 src-plat-$(CONFIG_AMIGAONE) += cuboot-amigaone.c
 src-plat-$(CONFIG_PPC_PS3) += ps3-head.S ps3-hvcall.S ps3.c
 src-plat-$(CONFIG_PPC_PS3) += ps3-head.S ps3-hvcall.S ps3.c
-src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c
+src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c epapr-wrapper.c
 
 
 src-wlib := $(sort $(src-wlib-y))
 src-wlib := $(sort $(src-wlib-y))
 src-plat := $(sort $(src-plat-y))
 src-plat := $(sort $(src-plat-y))

+ 9 - 0
arch/powerpc/boot/epapr-wrapper.c

@@ -0,0 +1,9 @@
+extern void epapr_platform_init(unsigned long r3, unsigned long r4,
+				unsigned long r5, unsigned long r6,
+				unsigned long r7);
+
+void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+		   unsigned long r6, unsigned long r7)
+{
+	epapr_platform_init(r3, r4, r5, r6, r7);
+}

+ 2 - 2
arch/powerpc/boot/epapr.c

@@ -48,8 +48,8 @@ static void platform_fixups(void)
 		       fdt_addr, fdt_totalsize((void *)fdt_addr), ima_size);
 		       fdt_addr, fdt_totalsize((void *)fdt_addr), ima_size);
 }
 }
 
 
-void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
-		   unsigned long r6, unsigned long r7)
+void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+			 unsigned long r6, unsigned long r7)
 {
 {
 	epapr_magic = r6;
 	epapr_magic = r6;
 	ima_size = r7;
 	ima_size = r7;

+ 15 - 1
arch/powerpc/boot/of.c

@@ -26,6 +26,9 @@
 
 
 static unsigned long claim_base;
 static unsigned long claim_base;
 
 
+void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+			 unsigned long r6, unsigned long r7);
+
 static void *of_try_claim(unsigned long size)
 static void *of_try_claim(unsigned long size)
 {
 {
 	unsigned long addr = 0;
 	unsigned long addr = 0;
@@ -61,7 +64,7 @@ static void of_image_hdr(const void *hdr)
 	}
 	}
 }
 }
 
 
-void platform_init(unsigned long a1, unsigned long a2, void *promptr)
+static void of_platform_init(unsigned long a1, unsigned long a2, void *promptr)
 {
 {
 	platform_ops.image_hdr = of_image_hdr;
 	platform_ops.image_hdr = of_image_hdr;
 	platform_ops.malloc = of_try_claim;
 	platform_ops.malloc = of_try_claim;
@@ -81,3 +84,14 @@ void platform_init(unsigned long a1, unsigned long a2, void *promptr)
 		loader_info.initrd_size = a2;
 		loader_info.initrd_size = a2;
 	}
 	}
 }
 }
+
+void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+		   unsigned long r6, unsigned long r7)
+{
+	/* Detect OF vs. ePAPR boot */
+	if (r5)
+		of_platform_init(r3, r4, (void *)r5);
+	else
+		epapr_platform_init(r3, r4, r5, r6, r7);
+}
+

+ 5 - 4
arch/powerpc/boot/wrapper

@@ -148,18 +148,18 @@ make_space=y
 
 
 case "$platform" in
 case "$platform" in
 pseries)
 pseries)
-    platformo=$object/of.o
+    platformo="$object/of.o $object/epapr.o"
     link_address='0x4000000'
     link_address='0x4000000'
     ;;
     ;;
 maple)
 maple)
-    platformo=$object/of.o
+    platformo="$object/of.o $object/epapr.o"
     link_address='0x400000'
     link_address='0x400000'
     ;;
     ;;
 pmac|chrp)
 pmac|chrp)
-    platformo=$object/of.o
+    platformo="$object/of.o $object/epapr.o"
     ;;
     ;;
 coff)
 coff)
-    platformo="$object/crt0.o $object/of.o"
+    platformo="$object/crt0.o $object/of.o $object/epapr.o"
     lds=$object/zImage.coff.lds
     lds=$object/zImage.coff.lds
     link_address='0x500000'
     link_address='0x500000'
     pie=
     pie=
@@ -253,6 +253,7 @@ treeboot-iss4xx-mpic)
     platformo="$object/treeboot-iss4xx.o"
     platformo="$object/treeboot-iss4xx.o"
     ;;
     ;;
 epapr)
 epapr)
+    platformo="$object/epapr.o $object/epapr-wrapper.o"
     link_address='0x20000000'
     link_address='0x20000000'
     pie=-pie
     pie=-pie
     ;;
     ;;

+ 2 - 2
arch/powerpc/include/asm/irq.h

@@ -69,9 +69,9 @@ extern struct thread_info *softirq_ctx[NR_CPUS];
 
 
 extern void irq_ctx_init(void);
 extern void irq_ctx_init(void);
 extern void call_do_softirq(struct thread_info *tp);
 extern void call_do_softirq(struct thread_info *tp);
-extern int call_handle_irq(int irq, void *p1,
-			   struct thread_info *tp, void *func);
+extern void call_do_irq(struct pt_regs *regs, struct thread_info *tp);
 extern void do_IRQ(struct pt_regs *regs);
 extern void do_IRQ(struct pt_regs *regs);
+extern void __do_irq(struct pt_regs *regs);
 
 
 int irq_choose_cpu(const struct cpumask *mask);
 int irq_choose_cpu(const struct cpumask *mask);
 
 

+ 1 - 3
arch/powerpc/include/asm/processor.h

@@ -149,8 +149,6 @@ typedef struct {
 
 
 struct thread_struct {
 struct thread_struct {
 	unsigned long	ksp;		/* Kernel stack pointer */
 	unsigned long	ksp;		/* Kernel stack pointer */
-	unsigned long	ksp_limit;	/* if ksp <= ksp_limit stack overflow */
-
 #ifdef CONFIG_PPC64
 #ifdef CONFIG_PPC64
 	unsigned long	ksp_vsid;
 	unsigned long	ksp_vsid;
 #endif
 #endif
@@ -162,6 +160,7 @@ struct thread_struct {
 #endif
 #endif
 #ifdef CONFIG_PPC32
 #ifdef CONFIG_PPC32
 	void		*pgdir;		/* root of page-table tree */
 	void		*pgdir;		/* root of page-table tree */
+	unsigned long	ksp_limit;	/* if ksp <= ksp_limit stack overflow */
 #endif
 #endif
 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
 	/*
 	/*
@@ -321,7 +320,6 @@ struct thread_struct {
 #else
 #else
 #define INIT_THREAD  { \
 #define INIT_THREAD  { \
 	.ksp = INIT_SP, \
 	.ksp = INIT_SP, \
-	.ksp_limit = INIT_SP_LIMIT, \
 	.regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
 	.regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
 	.fs = KERNEL_DS, \
 	.fs = KERNEL_DS, \
 	.fpr = {{0}}, \
 	.fpr = {{0}}, \

+ 2 - 1
arch/powerpc/kernel/asm-offsets.c

@@ -80,10 +80,11 @@ int main(void)
 	DEFINE(TASKTHREADPPR, offsetof(struct task_struct, thread.ppr));
 	DEFINE(TASKTHREADPPR, offsetof(struct task_struct, thread.ppr));
 #else
 #else
 	DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
 	DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
+	DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16));
+	DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
 #endif /* CONFIG_PPC64 */
 #endif /* CONFIG_PPC64 */
 
 
 	DEFINE(KSP, offsetof(struct thread_struct, ksp));
 	DEFINE(KSP, offsetof(struct thread_struct, ksp));
-	DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
 	DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
 	DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
 #ifdef CONFIG_BOOKE
 #ifdef CONFIG_BOOKE
 	DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0]));
 	DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0]));

+ 44 - 56
arch/powerpc/kernel/irq.c

@@ -441,50 +441,6 @@ void migrate_irqs(void)
 }
 }
 #endif
 #endif
 
 
-static inline void handle_one_irq(unsigned int irq)
-{
-	struct thread_info *curtp, *irqtp;
-	unsigned long saved_sp_limit;
-	struct irq_desc *desc;
-
-	desc = irq_to_desc(irq);
-	if (!desc)
-		return;
-
-	/* Switch to the irq stack to handle this */
-	curtp = current_thread_info();
-	irqtp = hardirq_ctx[smp_processor_id()];
-
-	if (curtp == irqtp) {
-		/* We're already on the irq stack, just handle it */
-		desc->handle_irq(irq, desc);
-		return;
-	}
-
-	saved_sp_limit = current->thread.ksp_limit;
-
-	irqtp->task = curtp->task;
-	irqtp->flags = 0;
-
-	/* Copy the softirq bits in preempt_count so that the
-	 * softirq checks work in the hardirq context. */
-	irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
-			       (curtp->preempt_count & SOFTIRQ_MASK);
-
-	current->thread.ksp_limit = (unsigned long)irqtp +
-		_ALIGN_UP(sizeof(struct thread_info), 16);
-
-	call_handle_irq(irq, desc, irqtp, desc->handle_irq);
-	current->thread.ksp_limit = saved_sp_limit;
-	irqtp->task = NULL;
-
-	/* Set any flag that may have been set on the
-	 * alternate stack
-	 */
-	if (irqtp->flags)
-		set_bits(irqtp->flags, &curtp->flags);
-}
-
 static inline void check_stack_overflow(void)
 static inline void check_stack_overflow(void)
 {
 {
 #ifdef CONFIG_DEBUG_STACKOVERFLOW
 #ifdef CONFIG_DEBUG_STACKOVERFLOW
@@ -501,9 +457,9 @@ static inline void check_stack_overflow(void)
 #endif
 #endif
 }
 }
 
 
-void do_IRQ(struct pt_regs *regs)
+void __do_irq(struct pt_regs *regs)
 {
 {
-	struct pt_regs *old_regs = set_irq_regs(regs);
+	struct irq_desc *desc;
 	unsigned int irq;
 	unsigned int irq;
 
 
 	irq_enter();
 	irq_enter();
@@ -519,18 +475,56 @@ void do_IRQ(struct pt_regs *regs)
 	 */
 	 */
 	irq = ppc_md.get_irq();
 	irq = ppc_md.get_irq();
 
 
-	/* We can hard enable interrupts now */
+	/* We can hard enable interrupts now to allow perf interrupts */
 	may_hard_irq_enable();
 	may_hard_irq_enable();
 
 
 	/* And finally process it */
 	/* And finally process it */
-	if (irq != NO_IRQ)
-		handle_one_irq(irq);
-	else
+	if (unlikely(irq == NO_IRQ))
 		__get_cpu_var(irq_stat).spurious_irqs++;
 		__get_cpu_var(irq_stat).spurious_irqs++;
+	else {
+		desc = irq_to_desc(irq);
+		if (likely(desc))
+			desc->handle_irq(irq, desc);
+	}
 
 
 	trace_irq_exit(regs);
 	trace_irq_exit(regs);
 
 
 	irq_exit();
 	irq_exit();
+}
+
+void do_IRQ(struct pt_regs *regs)
+{
+	struct pt_regs *old_regs = set_irq_regs(regs);
+	struct thread_info *curtp, *irqtp;
+
+	/* Switch to the irq stack to handle this */
+	curtp = current_thread_info();
+	irqtp = hardirq_ctx[raw_smp_processor_id()];
+
+	/* Already there ? */
+	if (unlikely(curtp == irqtp)) {
+		__do_irq(regs);
+		set_irq_regs(old_regs);
+		return;
+	}
+
+	/* Prepare the thread_info in the irq stack */
+	irqtp->task = curtp->task;
+	irqtp->flags = 0;
+
+	/* Copy the preempt_count so that the [soft]irq checks work. */
+	irqtp->preempt_count = curtp->preempt_count;
+
+	/* Switch stack and call */
+	call_do_irq(regs, irqtp);
+
+	/* Restore stack limit */
+	irqtp->task = NULL;
+
+	/* Copy back updates to the thread_info */
+	if (irqtp->flags)
+		set_bits(irqtp->flags, &curtp->flags);
+
 	set_irq_regs(old_regs);
 	set_irq_regs(old_regs);
 }
 }
 
 
@@ -592,28 +586,22 @@ void irq_ctx_init(void)
 		memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
 		memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
 		tp = softirq_ctx[i];
 		tp = softirq_ctx[i];
 		tp->cpu = i;
 		tp->cpu = i;
-		tp->preempt_count = 0;
 
 
 		memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
 		memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
 		tp = hardirq_ctx[i];
 		tp = hardirq_ctx[i];
 		tp->cpu = i;
 		tp->cpu = i;
-		tp->preempt_count = HARDIRQ_OFFSET;
 	}
 	}
 }
 }
 
 
 static inline void do_softirq_onstack(void)
 static inline void do_softirq_onstack(void)
 {
 {
 	struct thread_info *curtp, *irqtp;
 	struct thread_info *curtp, *irqtp;
-	unsigned long saved_sp_limit = current->thread.ksp_limit;
 
 
 	curtp = current_thread_info();
 	curtp = current_thread_info();
 	irqtp = softirq_ctx[smp_processor_id()];
 	irqtp = softirq_ctx[smp_processor_id()];
 	irqtp->task = curtp->task;
 	irqtp->task = curtp->task;
 	irqtp->flags = 0;
 	irqtp->flags = 0;
-	current->thread.ksp_limit = (unsigned long)irqtp +
-				    _ALIGN_UP(sizeof(struct thread_info), 16);
 	call_do_softirq(irqtp);
 	call_do_softirq(irqtp);
-	current->thread.ksp_limit = saved_sp_limit;
 	irqtp->task = NULL;
 	irqtp->task = NULL;
 
 
 	/* Set any flag that may have been set on the
 	/* Set any flag that may have been set on the

+ 20 - 5
arch/powerpc/kernel/misc_32.S

@@ -36,26 +36,41 @@
 
 
 	.text
 	.text
 
 
+/*
+ * We store the saved ksp_limit in the unused part
+ * of the STACK_FRAME_OVERHEAD
+ */
 _GLOBAL(call_do_softirq)
 _GLOBAL(call_do_softirq)
 	mflr	r0
 	mflr	r0
 	stw	r0,4(r1)
 	stw	r0,4(r1)
+	lwz	r10,THREAD+KSP_LIMIT(r2)
+	addi	r11,r3,THREAD_INFO_GAP
 	stwu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
 	stwu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
 	mr	r1,r3
 	mr	r1,r3
+	stw	r10,8(r1)
+	stw	r11,THREAD+KSP_LIMIT(r2)
 	bl	__do_softirq
 	bl	__do_softirq
+	lwz	r10,8(r1)
 	lwz	r1,0(r1)
 	lwz	r1,0(r1)
 	lwz	r0,4(r1)
 	lwz	r0,4(r1)
+	stw	r10,THREAD+KSP_LIMIT(r2)
 	mtlr	r0
 	mtlr	r0
 	blr
 	blr
 
 
-_GLOBAL(call_handle_irq)
+_GLOBAL(call_do_irq)
 	mflr	r0
 	mflr	r0
 	stw	r0,4(r1)
 	stw	r0,4(r1)
-	mtctr	r6
-	stwu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5)
-	mr	r1,r5
-	bctrl
+	lwz	r10,THREAD+KSP_LIMIT(r2)
+	addi	r11,r3,THREAD_INFO_GAP
+	stwu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
+	mr	r1,r4
+	stw	r10,8(r1)
+	stw	r11,THREAD+KSP_LIMIT(r2)
+	bl	__do_irq
+	lwz	r10,8(r1)
 	lwz	r1,0(r1)
 	lwz	r1,0(r1)
 	lwz	r0,4(r1)
 	lwz	r0,4(r1)
+	stw	r10,THREAD+KSP_LIMIT(r2)
 	mtlr	r0
 	mtlr	r0
 	blr
 	blr
 
 

+ 4 - 6
arch/powerpc/kernel/misc_64.S

@@ -40,14 +40,12 @@ _GLOBAL(call_do_softirq)
 	mtlr	r0
 	mtlr	r0
 	blr
 	blr
 
 
-_GLOBAL(call_handle_irq)
-	ld	r8,0(r6)
+_GLOBAL(call_do_irq)
 	mflr	r0
 	mflr	r0
 	std	r0,16(r1)
 	std	r0,16(r1)
-	mtctr	r8
-	stdu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5)
-	mr	r1,r5
-	bctrl
+	stdu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
+	mr	r1,r4
+	bl	.__do_irq
 	ld	r1,0(r1)
 	ld	r1,0(r1)
 	ld	r0,16(r1)
 	ld	r0,16(r1)
 	mtlr	r0
 	mtlr	r0

+ 2 - 1
arch/powerpc/kernel/process.c

@@ -1000,9 +1000,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
 	kregs = (struct pt_regs *) sp;
 	kregs = (struct pt_regs *) sp;
 	sp -= STACK_FRAME_OVERHEAD;
 	sp -= STACK_FRAME_OVERHEAD;
 	p->thread.ksp = sp;
 	p->thread.ksp = sp;
+#ifdef CONFIG_PPC32
 	p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
 	p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
 				_ALIGN_UP(sizeof(struct thread_info), 16);
 				_ALIGN_UP(sizeof(struct thread_info), 16);
-
+#endif
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
 	p->thread.ptrace_bps[0] = NULL;
 	p->thread.ptrace_bps[0] = NULL;
 #endif
 #endif

+ 21 - 0
arch/powerpc/kernel/prom_init.c

@@ -196,6 +196,8 @@ static int __initdata mem_reserve_cnt;
 
 
 static cell_t __initdata regbuf[1024];
 static cell_t __initdata regbuf[1024];
 
 
+static bool rtas_has_query_cpu_stopped;
+
 
 
 /*
 /*
  * Error results ... some OF calls will return "-1" on error, some
  * Error results ... some OF calls will return "-1" on error, some
@@ -1574,6 +1576,11 @@ static void __init prom_instantiate_rtas(void)
 	prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
 	prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
 		     &val, sizeof(val));
 		     &val, sizeof(val));
 
 
+	/* Check if it supports "query-cpu-stopped-state" */
+	if (prom_getprop(rtas_node, "query-cpu-stopped-state",
+			 &val, sizeof(val)) != PROM_ERROR)
+		rtas_has_query_cpu_stopped = true;
+
 #if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__)
 #if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__)
 	/* PowerVN takeover hack */
 	/* PowerVN takeover hack */
 	prom_rtas_data = base;
 	prom_rtas_data = base;
@@ -1815,6 +1822,18 @@ static void __init prom_hold_cpus(void)
 		= (void *) LOW_ADDR(__secondary_hold_acknowledge);
 		= (void *) LOW_ADDR(__secondary_hold_acknowledge);
 	unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
 	unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
 
 
+	/*
+	 * On pseries, if RTAS supports "query-cpu-stopped-state",
+	 * we skip this stage, the CPUs will be started by the
+	 * kernel using RTAS.
+	 */
+	if ((of_platform == PLATFORM_PSERIES ||
+	     of_platform == PLATFORM_PSERIES_LPAR) &&
+	    rtas_has_query_cpu_stopped) {
+		prom_printf("prom_hold_cpus: skipped\n");
+		return;
+	}
+
 	prom_debug("prom_hold_cpus: start...\n");
 	prom_debug("prom_hold_cpus: start...\n");
 	prom_debug("    1) spinloop       = 0x%x\n", (unsigned long)spinloop);
 	prom_debug("    1) spinloop       = 0x%x\n", (unsigned long)spinloop);
 	prom_debug("    1) *spinloop      = 0x%x\n", *spinloop);
 	prom_debug("    1) *spinloop      = 0x%x\n", *spinloop);
@@ -3011,6 +3030,8 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
 	 * On non-powermacs, put all CPUs in spin-loops.
 	 * On non-powermacs, put all CPUs in spin-loops.
 	 *
 	 *
 	 * PowerMacs use a different mechanism to spin CPUs
 	 * PowerMacs use a different mechanism to spin CPUs
+	 *
+	 * (This must be done after instanciating RTAS)
 	 */
 	 */
 	if (of_platform != PLATFORM_POWERMAC &&
 	if (of_platform != PLATFORM_POWERMAC &&
 	    of_platform != PLATFORM_OPAL)
 	    of_platform != PLATFORM_OPAL)

+ 2 - 1
arch/powerpc/lib/sstep.c

@@ -1505,6 +1505,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
 		 */
 		 */
 		if ((ra == 1) && !(regs->msr & MSR_PR) \
 		if ((ra == 1) && !(regs->msr & MSR_PR) \
 			&& (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) {
 			&& (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) {
+#ifdef CONFIG_PPC32
 			/*
 			/*
 			 * Check if we will touch kernel sack overflow
 			 * Check if we will touch kernel sack overflow
 			 */
 			 */
@@ -1513,7 +1514,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
 				err = -EINVAL;
 				err = -EINVAL;
 				break;
 				break;
 			}
 			}
-
+#endif /* CONFIG_PPC32 */
 			/*
 			/*
 			 * Check if we already set since that means we'll
 			 * Check if we already set since that means we'll
 			 * lose the previous value.
 			 * lose the previous value.

+ 16 - 10
arch/powerpc/platforms/pseries/smp.c

@@ -233,18 +233,24 @@ static void __init smp_init_pseries(void)
 
 
 	alloc_bootmem_cpumask_var(&of_spin_mask);
 	alloc_bootmem_cpumask_var(&of_spin_mask);
 
 
-	/* Mark threads which are still spinning in hold loops. */
-	if (cpu_has_feature(CPU_FTR_SMT)) {
-		for_each_present_cpu(i) { 
-			if (cpu_thread_in_core(i) == 0)
-				cpumask_set_cpu(i, of_spin_mask);
-		}
-	} else {
-		cpumask_copy(of_spin_mask, cpu_present_mask);
+	/*
+	 * Mark threads which are still spinning in hold loops
+	 *
+	 * We know prom_init will not have started them if RTAS supports
+	 * query-cpu-stopped-state.
+	 */
+	if (rtas_token("query-cpu-stopped-state") == RTAS_UNKNOWN_SERVICE) {
+		if (cpu_has_feature(CPU_FTR_SMT)) {
+			for_each_present_cpu(i) {
+				if (cpu_thread_in_core(i) == 0)
+					cpumask_set_cpu(i, of_spin_mask);
+			}
+		} else
+			cpumask_copy(of_spin_mask, cpu_present_mask);
+
+		cpumask_clear_cpu(boot_cpuid, of_spin_mask);
 	}
 	}
 
 
-	cpumask_clear_cpu(boot_cpuid, of_spin_mask);
-
 	/* Non-lpar has additional take/give timebase */
 	/* Non-lpar has additional take/give timebase */
 	if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
 	if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
 		smp_ops->give_timebase = rtas_give_timebase;
 		smp_ops->give_timebase = rtas_give_timebase;

+ 1 - 1
arch/s390/Kconfig

@@ -93,6 +93,7 @@ config S390
 	select ARCH_INLINE_WRITE_UNLOCK_IRQ
 	select ARCH_INLINE_WRITE_UNLOCK_IRQ
 	select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
 	select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
 	select ARCH_SAVE_PAGE_KEYS if HIBERNATION
 	select ARCH_SAVE_PAGE_KEYS if HIBERNATION
+	select ARCH_USE_CMPXCHG_LOCKREF
 	select ARCH_WANT_IPC_PARSE_VERSION
 	select ARCH_WANT_IPC_PARSE_VERSION
 	select BUILDTIME_EXTABLE_SORT
 	select BUILDTIME_EXTABLE_SORT
 	select CLONE_BACKWARDS2
 	select CLONE_BACKWARDS2
@@ -102,7 +103,6 @@ config S390
 	select GENERIC_TIME_VSYSCALL_OLD
 	select GENERIC_TIME_VSYSCALL_OLD
 	select HAVE_ALIGNED_STRUCT_PAGE if SLUB
 	select HAVE_ALIGNED_STRUCT_PAGE if SLUB
 	select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
 	select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
-	select HAVE_ARCH_MUTEX_CPU_RELAX
 	select HAVE_ARCH_SECCOMP_FILTER
 	select HAVE_ARCH_SECCOMP_FILTER
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT
 	select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT

+ 0 - 2
arch/s390/include/asm/mutex.h

@@ -7,5 +7,3 @@
  */
  */
 
 
 #include <asm-generic/mutex-dec.h>
 #include <asm-generic/mutex-dec.h>
-
-#define arch_mutex_cpu_relax()	barrier()

+ 2 - 0
arch/s390/include/asm/processor.h

@@ -198,6 +198,8 @@ static inline void cpu_relax(void)
 	barrier();
 	barrier();
 }
 }
 
 
+#define arch_mutex_cpu_relax()  barrier()
+
 static inline void psw_set_key(unsigned int key)
 static inline void psw_set_key(unsigned int key)
 {
 {
 	asm volatile("spka 0(%0)" : : "d" (key));
 	asm volatile("spka 0(%0)" : : "d" (key));

+ 5 - 0
arch/s390/include/asm/spinlock.h

@@ -44,6 +44,11 @@ extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
 extern int arch_spin_trylock_retry(arch_spinlock_t *);
 extern int arch_spin_trylock_retry(arch_spinlock_t *);
 extern void arch_spin_relax(arch_spinlock_t *lock);
 extern void arch_spin_relax(arch_spinlock_t *lock);
 
 
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+	return lock.owner_cpu == 0;
+}
+
 static inline void arch_spin_lock(arch_spinlock_t *lp)
 static inline void arch_spin_lock(arch_spinlock_t *lp)
 {
 {
 	int old;
 	int old;

+ 20 - 11
arch/x86/include/asm/xen/page.h

@@ -79,30 +79,38 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn)
 	return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY;
 	return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY;
 }
 }
 
 
-static inline unsigned long mfn_to_pfn(unsigned long mfn)
+static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
 {
 {
 	unsigned long pfn;
 	unsigned long pfn;
-	int ret = 0;
+	int ret;
 
 
 	if (xen_feature(XENFEAT_auto_translated_physmap))
 	if (xen_feature(XENFEAT_auto_translated_physmap))
 		return mfn;
 		return mfn;
 
 
-	if (unlikely(mfn >= machine_to_phys_nr)) {
-		pfn = ~0;
-		goto try_override;
-	}
-	pfn = 0;
+	if (unlikely(mfn >= machine_to_phys_nr))
+		return ~0;
+
 	/*
 	/*
 	 * The array access can fail (e.g., device space beyond end of RAM).
 	 * The array access can fail (e.g., device space beyond end of RAM).
 	 * In such cases it doesn't matter what we return (we return garbage),
 	 * In such cases it doesn't matter what we return (we return garbage),
 	 * but we must handle the fault without crashing!
 	 * but we must handle the fault without crashing!
 	 */
 	 */
 	ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
 	ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
-try_override:
-	/* ret might be < 0 if there are no entries in the m2p for mfn */
 	if (ret < 0)
 	if (ret < 0)
-		pfn = ~0;
-	else if (get_phys_to_machine(pfn) != mfn)
+		return ~0;
+
+	return pfn;
+}
+
+static inline unsigned long mfn_to_pfn(unsigned long mfn)
+{
+	unsigned long pfn;
+
+	if (xen_feature(XENFEAT_auto_translated_physmap))
+		return mfn;
+
+	pfn = mfn_to_pfn_no_overrides(mfn);
+	if (get_phys_to_machine(pfn) != mfn) {
 		/*
 		/*
 		 * If this appears to be a foreign mfn (because the pfn
 		 * If this appears to be a foreign mfn (because the pfn
 		 * doesn't map back to the mfn), then check the local override
 		 * doesn't map back to the mfn), then check the local override
@@ -111,6 +119,7 @@ try_override:
 		 * m2p_find_override_pfn returns ~0 if it doesn't find anything.
 		 * m2p_find_override_pfn returns ~0 if it doesn't find anything.
 		 */
 		 */
 		pfn = m2p_find_override_pfn(mfn, ~0);
 		pfn = m2p_find_override_pfn(mfn, ~0);
+	}
 
 
 	/* 
 	/* 
 	 * pfn is ~0 if there are no entries in the m2p for mfn or if the
 	 * pfn is ~0 if there are no entries in the m2p for mfn or if the

+ 6 - 6
arch/x86/kernel/cpu/perf_event.c

@@ -1506,7 +1506,7 @@ static int __init init_hw_perf_events(void)
 		err = amd_pmu_init();
 		err = amd_pmu_init();
 		break;
 		break;
 	default:
 	default:
-		return 0;
+		err = -ENOTSUPP;
 	}
 	}
 	if (err != 0) {
 	if (err != 0) {
 		pr_cont("no PMU driver, software events only.\n");
 		pr_cont("no PMU driver, software events only.\n");
@@ -1883,9 +1883,9 @@ static struct pmu pmu = {
 
 
 void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
 void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
 {
 {
-	userpg->cap_usr_time = 0;
-	userpg->cap_usr_time_zero = 0;
-	userpg->cap_usr_rdpmc = x86_pmu.attr_rdpmc;
+	userpg->cap_user_time = 0;
+	userpg->cap_user_time_zero = 0;
+	userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc;
 	userpg->pmc_width = x86_pmu.cntval_bits;
 	userpg->pmc_width = x86_pmu.cntval_bits;
 
 
 	if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
 	if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
@@ -1894,13 +1894,13 @@ void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
 	if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
 	if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
 		return;
 		return;
 
 
-	userpg->cap_usr_time = 1;
+	userpg->cap_user_time = 1;
 	userpg->time_mult = this_cpu_read(cyc2ns);
 	userpg->time_mult = this_cpu_read(cyc2ns);
 	userpg->time_shift = CYC2NS_SCALE_FACTOR;
 	userpg->time_shift = CYC2NS_SCALE_FACTOR;
 	userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
 	userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
 
 
 	if (sched_clock_stable && !check_tsc_disabled()) {
 	if (sched_clock_stable && !check_tsc_disabled()) {
-		userpg->cap_usr_time_zero = 1;
+		userpg->cap_user_time_zero = 1;
 		userpg->time_zero = this_cpu_read(cyc2ns_offset);
 		userpg->time_zero = this_cpu_read(cyc2ns_offset);
 	}
 	}
 }
 }

+ 1 - 0
arch/x86/kernel/cpu/perf_event_intel.c

@@ -2325,6 +2325,7 @@ __init int intel_pmu_init(void)
 		break;
 		break;
 
 
 	case 55: /* Atom 22nm "Silvermont" */
 	case 55: /* Atom 22nm "Silvermont" */
+	case 77: /* Avoton "Silvermont" */
 		memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
 		memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
 			sizeof(hw_cache_event_ids));
 			sizeof(hw_cache_event_ids));
 		memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
 		memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,

+ 5 - 5
arch/x86/kernel/cpu/perf_event_intel_uncore.c

@@ -2706,14 +2706,14 @@ static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
 	box->hrtimer.function = uncore_pmu_hrtimer;
 	box->hrtimer.function = uncore_pmu_hrtimer;
 }
 }
 
 
-struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cpu)
+static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int node)
 {
 {
 	struct intel_uncore_box *box;
 	struct intel_uncore_box *box;
 	int i, size;
 	int i, size;
 
 
 	size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
 	size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
 
 
-	box = kzalloc_node(size, GFP_KERNEL, cpu_to_node(cpu));
+	box = kzalloc_node(size, GFP_KERNEL, node);
 	if (!box)
 	if (!box)
 		return NULL;
 		return NULL;
 
 
@@ -3031,7 +3031,7 @@ static int uncore_validate_group(struct intel_uncore_pmu *pmu,
 	struct intel_uncore_box *fake_box;
 	struct intel_uncore_box *fake_box;
 	int ret = -EINVAL, n;
 	int ret = -EINVAL, n;
 
 
-	fake_box = uncore_alloc_box(pmu->type, smp_processor_id());
+	fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
 	if (!fake_box)
 	if (!fake_box)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
@@ -3294,7 +3294,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
 	}
 	}
 
 
 	type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
 	type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
-	box = uncore_alloc_box(type, 0);
+	box = uncore_alloc_box(type, NUMA_NO_NODE);
 	if (!box)
 	if (!box)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
@@ -3499,7 +3499,7 @@ static int uncore_cpu_prepare(int cpu, int phys_id)
 			if (pmu->func_id < 0)
 			if (pmu->func_id < 0)
 				pmu->func_id = j;
 				pmu->func_id = j;
 
 
-			box = uncore_alloc_box(type, cpu);
+			box = uncore_alloc_box(type, cpu_to_node(cpu));
 			if (!box)
 			if (!box)
 				return -ENOMEM;
 				return -ENOMEM;
 
 

+ 1 - 0
arch/x86/kernel/microcode_amd.c

@@ -216,6 +216,7 @@ int apply_microcode_amd(int cpu)
 	/* need to apply patch? */
 	/* need to apply patch? */
 	if (rev >= mc_amd->hdr.patch_id) {
 	if (rev >= mc_amd->hdr.patch_id) {
 		c->microcode = rev;
 		c->microcode = rev;
+		uci->cpu_sig.rev = rev;
 		return 0;
 		return 0;
 	}
 	}
 
 

+ 17 - 1
arch/x86/kernel/reboot.c

@@ -352,12 +352,28 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
 	},
 	},
 	{	/* Handle problems with rebooting on the Precision M6600. */
 	{	/* Handle problems with rebooting on the Precision M6600. */
 		.callback = set_pci_reboot,
 		.callback = set_pci_reboot,
-		.ident = "Dell OptiPlex 990",
+		.ident = "Dell Precision M6600",
 		.matches = {
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
 		},
 		},
 	},
 	},
+	{	/* Handle problems with rebooting on the Dell PowerEdge C6100. */
+		.callback = set_pci_reboot,
+		.ident = "Dell PowerEdge C6100",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
+		},
+	},
+	{	/* Some C6100 machines were shipped with vendor being 'Dell'. */
+		.callback = set_pci_reboot,
+		.ident = "Dell PowerEdge C6100",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
+		},
+	},
 	{ }
 	{ }
 };
 };
 
 

+ 7 - 4
arch/x86/platform/efi/efi.c

@@ -912,10 +912,13 @@ void __init efi_enter_virtual_mode(void)
 
 
 	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
 	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
 		md = p;
 		md = p;
-		if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
-		    md->type != EFI_BOOT_SERVICES_CODE &&
-		    md->type != EFI_BOOT_SERVICES_DATA)
-			continue;
+		if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
+#ifdef CONFIG_X86_64
+			if (md->type != EFI_BOOT_SERVICES_CODE &&
+			    md->type != EFI_BOOT_SERVICES_DATA)
+#endif
+				continue;
+		}
 
 
 		size = md->num_pages << EFI_PAGE_SHIFT;
 		size = md->num_pages << EFI_PAGE_SHIFT;
 		end = md->phys_addr + size;
 		end = md->phys_addr + size;

+ 4 - 6
arch/x86/xen/p2m.c

@@ -879,7 +879,6 @@ int m2p_add_override(unsigned long mfn, struct page *page,
 	unsigned long uninitialized_var(address);
 	unsigned long uninitialized_var(address);
 	unsigned level;
 	unsigned level;
 	pte_t *ptep = NULL;
 	pte_t *ptep = NULL;
-	int ret = 0;
 
 
 	pfn = page_to_pfn(page);
 	pfn = page_to_pfn(page);
 	if (!PageHighMem(page)) {
 	if (!PageHighMem(page)) {
@@ -926,8 +925,8 @@ int m2p_add_override(unsigned long mfn, struct page *page,
 	 * frontend pages while they are being shared with the backend,
 	 * frontend pages while they are being shared with the backend,
 	 * because mfn_to_pfn (that ends up being called by GUPF) will
 	 * because mfn_to_pfn (that ends up being called by GUPF) will
 	 * return the backend pfn rather than the frontend pfn. */
 	 * return the backend pfn rather than the frontend pfn. */
-	ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
-	if (ret == 0 && get_phys_to_machine(pfn) == mfn)
+	pfn = mfn_to_pfn_no_overrides(mfn);
+	if (get_phys_to_machine(pfn) == mfn)
 		set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
 		set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
 
 
 	return 0;
 	return 0;
@@ -942,7 +941,6 @@ int m2p_remove_override(struct page *page,
 	unsigned long uninitialized_var(address);
 	unsigned long uninitialized_var(address);
 	unsigned level;
 	unsigned level;
 	pte_t *ptep = NULL;
 	pte_t *ptep = NULL;
-	int ret = 0;
 
 
 	pfn = page_to_pfn(page);
 	pfn = page_to_pfn(page);
 	mfn = get_phys_to_machine(pfn);
 	mfn = get_phys_to_machine(pfn);
@@ -1029,8 +1027,8 @@ int m2p_remove_override(struct page *page,
 	 * the original pfn causes mfn_to_pfn(mfn) to return the frontend
 	 * the original pfn causes mfn_to_pfn(mfn) to return the frontend
 	 * pfn again. */
 	 * pfn again. */
 	mfn &= ~FOREIGN_FRAME_BIT;
 	mfn &= ~FOREIGN_FRAME_BIT;
-	ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
-	if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
+	pfn = mfn_to_pfn_no_overrides(mfn);
+	if (get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
 			m2p_find_override(mfn) == NULL)
 			m2p_find_override(mfn) == NULL)
 		set_phys_to_machine(pfn, mfn);
 		set_phys_to_machine(pfn, mfn);
 
 

+ 24 - 2
arch/x86/xen/spinlock.c

@@ -259,6 +259,14 @@ void xen_uninit_lock_cpu(int cpu)
 }
 }
 
 
 
 
+/*
+ * Our init of PV spinlocks is split in two init functions due to us
+ * using paravirt patching and jump labels patching and having to do
+ * all of this before SMP code is invoked.
+ *
+ * The paravirt patching needs to be done _before_ the alternative asm code
+ * is started, otherwise we would not patch the core kernel code.
+ */
 void __init xen_init_spinlocks(void)
 void __init xen_init_spinlocks(void)
 {
 {
 
 
@@ -267,12 +275,26 @@ void __init xen_init_spinlocks(void)
 		return;
 		return;
 	}
 	}
 
 
-	static_key_slow_inc(&paravirt_ticketlocks_enabled);
-
 	pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
 	pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
 	pv_lock_ops.unlock_kick = xen_unlock_kick;
 	pv_lock_ops.unlock_kick = xen_unlock_kick;
 }
 }
 
 
+/*
+ * While the jump_label init code needs to happend _after_ the jump labels are
+ * enabled and before SMP is started. Hence we use pre-SMP initcall level
+ * init. We cannot do it in xen_init_spinlocks as that is done before
+ * jump labels are activated.
+ */
+static __init int xen_init_spinlocks_jump(void)
+{
+	if (!xen_pvspin)
+		return 0;
+
+	static_key_slow_inc(&paravirt_ticketlocks_enabled);
+	return 0;
+}
+early_initcall(xen_init_spinlocks_jump);
+
 static __init int xen_parse_nopvspin(char *arg)
 static __init int xen_parse_nopvspin(char *arg)
 {
 {
 	xen_pvspin = false;
 	xen_pvspin = false;

+ 14 - 10
drivers/acpi/acpi_ipmi.c

@@ -39,6 +39,7 @@
 #include <linux/ipmi.h>
 #include <linux/ipmi.h>
 #include <linux/device.h>
 #include <linux/device.h>
 #include <linux/pnp.h>
 #include <linux/pnp.h>
+#include <linux/spinlock.h>
 
 
 MODULE_AUTHOR("Zhao Yakui");
 MODULE_AUTHOR("Zhao Yakui");
 MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
 MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
@@ -57,7 +58,7 @@ struct acpi_ipmi_device {
 	struct list_head head;
 	struct list_head head;
 	/* the IPMI request message list */
 	/* the IPMI request message list */
 	struct list_head tx_msg_list;
 	struct list_head tx_msg_list;
-	struct mutex	tx_msg_lock;
+	spinlock_t	tx_msg_lock;
 	acpi_handle handle;
 	acpi_handle handle;
 	struct pnp_dev *pnp_dev;
 	struct pnp_dev *pnp_dev;
 	ipmi_user_t	user_interface;
 	ipmi_user_t	user_interface;
@@ -147,6 +148,7 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
 	struct kernel_ipmi_msg *msg;
 	struct kernel_ipmi_msg *msg;
 	struct acpi_ipmi_buffer *buffer;
 	struct acpi_ipmi_buffer *buffer;
 	struct acpi_ipmi_device *device;
 	struct acpi_ipmi_device *device;
+	unsigned long flags;
 
 
 	msg = &tx_msg->tx_message;
 	msg = &tx_msg->tx_message;
 	/*
 	/*
@@ -177,10 +179,10 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
 
 
 	/* Get the msgid */
 	/* Get the msgid */
 	device = tx_msg->device;
 	device = tx_msg->device;
-	mutex_lock(&device->tx_msg_lock);
+	spin_lock_irqsave(&device->tx_msg_lock, flags);
 	device->curr_msgid++;
 	device->curr_msgid++;
 	tx_msg->tx_msgid = device->curr_msgid;
 	tx_msg->tx_msgid = device->curr_msgid;
-	mutex_unlock(&device->tx_msg_lock);
+	spin_unlock_irqrestore(&device->tx_msg_lock, flags);
 }
 }
 
 
 static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
 static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
@@ -242,6 +244,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
 	int msg_found = 0;
 	int msg_found = 0;
 	struct acpi_ipmi_msg *tx_msg;
 	struct acpi_ipmi_msg *tx_msg;
 	struct pnp_dev *pnp_dev = ipmi_device->pnp_dev;
 	struct pnp_dev *pnp_dev = ipmi_device->pnp_dev;
+	unsigned long flags;
 
 
 	if (msg->user != ipmi_device->user_interface) {
 	if (msg->user != ipmi_device->user_interface) {
 		dev_warn(&pnp_dev->dev, "Unexpected response is returned. "
 		dev_warn(&pnp_dev->dev, "Unexpected response is returned. "
@@ -250,7 +253,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
 		ipmi_free_recv_msg(msg);
 		ipmi_free_recv_msg(msg);
 		return;
 		return;
 	}
 	}
-	mutex_lock(&ipmi_device->tx_msg_lock);
+	spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
 	list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) {
 	list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) {
 		if (msg->msgid == tx_msg->tx_msgid) {
 		if (msg->msgid == tx_msg->tx_msgid) {
 			msg_found = 1;
 			msg_found = 1;
@@ -258,7 +261,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
 		}
 		}
 	}
 	}
 
 
-	mutex_unlock(&ipmi_device->tx_msg_lock);
+	spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
 	if (!msg_found) {
 	if (!msg_found) {
 		dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is "
 		dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is "
 			"returned.\n", msg->msgid);
 			"returned.\n", msg->msgid);
@@ -378,6 +381,7 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
 	struct acpi_ipmi_device *ipmi_device = handler_context;
 	struct acpi_ipmi_device *ipmi_device = handler_context;
 	int err, rem_time;
 	int err, rem_time;
 	acpi_status status;
 	acpi_status status;
+	unsigned long flags;
 	/*
 	/*
 	 * IPMI opregion message.
 	 * IPMI opregion message.
 	 * IPMI message is firstly written to the BMC and system software
 	 * IPMI message is firstly written to the BMC and system software
@@ -395,9 +399,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
 		return AE_NO_MEMORY;
 		return AE_NO_MEMORY;
 
 
 	acpi_format_ipmi_msg(tx_msg, address, value);
 	acpi_format_ipmi_msg(tx_msg, address, value);
-	mutex_lock(&ipmi_device->tx_msg_lock);
+	spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
 	list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
 	list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
-	mutex_unlock(&ipmi_device->tx_msg_lock);
+	spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
 	err = ipmi_request_settime(ipmi_device->user_interface,
 	err = ipmi_request_settime(ipmi_device->user_interface,
 					&tx_msg->addr,
 					&tx_msg->addr,
 					tx_msg->tx_msgid,
 					tx_msg->tx_msgid,
@@ -413,9 +417,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
 	status = AE_OK;
 	status = AE_OK;
 
 
 end_label:
 end_label:
-	mutex_lock(&ipmi_device->tx_msg_lock);
+	spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
 	list_del(&tx_msg->head);
 	list_del(&tx_msg->head);
-	mutex_unlock(&ipmi_device->tx_msg_lock);
+	spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
 	kfree(tx_msg);
 	kfree(tx_msg);
 	return status;
 	return status;
 }
 }
@@ -457,7 +461,7 @@ static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device)
 
 
 	INIT_LIST_HEAD(&ipmi_device->head);
 	INIT_LIST_HEAD(&ipmi_device->head);
 
 
-	mutex_init(&ipmi_device->tx_msg_lock);
+	spin_lock_init(&ipmi_device->tx_msg_lock);
 	INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
 	INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
 	ipmi_install_space_handler(ipmi_device);
 	ipmi_install_space_handler(ipmi_device);
 
 

+ 1 - 1
drivers/acpi/scan.c

@@ -1121,7 +1121,7 @@ int acpi_bus_register_driver(struct acpi_driver *driver)
 EXPORT_SYMBOL(acpi_bus_register_driver);
 EXPORT_SYMBOL(acpi_bus_register_driver);
 
 
 /**
 /**
- * acpi_bus_unregister_driver - unregisters a driver with the APIC bus
+ * acpi_bus_unregister_driver - unregisters a driver with the ACPI bus
  * @driver: driver to unregister
  * @driver: driver to unregister
  *
  *
  * Unregisters a driver with the ACPI bus.  Searches the namespace for all
  * Unregisters a driver with the ACPI bus.  Searches the namespace for all

+ 1 - 1
drivers/ata/sata_promise.c

@@ -2,7 +2,7 @@
  *  sata_promise.c - Promise SATA
  *  sata_promise.c - Promise SATA
  *
  *
  *  Maintained by:  Tejun Heo <tj@kernel.org>
  *  Maintained by:  Tejun Heo <tj@kernel.org>
- *		    Mikael Pettersson <mikpe@it.uu.se>
+ *		    Mikael Pettersson
  *  		    Please ALWAYS copy linux-ide@vger.kernel.org
  *  		    Please ALWAYS copy linux-ide@vger.kernel.org
  *		    on emails.
  *		    on emails.
  *
  *

+ 7 - 7
drivers/base/core.c

@@ -2017,7 +2017,7 @@ EXPORT_SYMBOL_GPL(device_move);
  */
  */
 void device_shutdown(void)
 void device_shutdown(void)
 {
 {
-	struct device *dev;
+	struct device *dev, *parent;
 
 
 	spin_lock(&devices_kset->list_lock);
 	spin_lock(&devices_kset->list_lock);
 	/*
 	/*
@@ -2034,7 +2034,7 @@ void device_shutdown(void)
 		 * prevent it from being freed because parent's
 		 * prevent it from being freed because parent's
 		 * lock is to be held
 		 * lock is to be held
 		 */
 		 */
-		get_device(dev->parent);
+		parent = get_device(dev->parent);
 		get_device(dev);
 		get_device(dev);
 		/*
 		/*
 		 * Make sure the device is off the kset list, in the
 		 * Make sure the device is off the kset list, in the
@@ -2044,8 +2044,8 @@ void device_shutdown(void)
 		spin_unlock(&devices_kset->list_lock);
 		spin_unlock(&devices_kset->list_lock);
 
 
 		/* hold lock to avoid race with probe/release */
 		/* hold lock to avoid race with probe/release */
-		if (dev->parent)
-			device_lock(dev->parent);
+		if (parent)
+			device_lock(parent);
 		device_lock(dev);
 		device_lock(dev);
 
 
 		/* Don't allow any more runtime suspends */
 		/* Don't allow any more runtime suspends */
@@ -2063,11 +2063,11 @@ void device_shutdown(void)
 		}
 		}
 
 
 		device_unlock(dev);
 		device_unlock(dev);
-		if (dev->parent)
-			device_unlock(dev->parent);
+		if (parent)
+			device_unlock(parent);
 
 
 		put_device(dev);
 		put_device(dev);
-		put_device(dev->parent);
+		put_device(parent);
 
 
 		spin_lock(&devices_kset->list_lock);
 		spin_lock(&devices_kset->list_lock);
 	}
 	}

+ 1 - 0
drivers/block/cciss.c

@@ -1189,6 +1189,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
 	int err;
 	int err;
 	u32 cp;
 	u32 cp;
 
 
+	memset(&arg64, 0, sizeof(arg64));
 	err = 0;
 	err = 0;
 	err |=
 	err |=
 	    copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
 	    copy_from_user(&arg64.LUN_info, &arg32->LUN_info,

+ 1 - 0
drivers/block/cpqarray.c

@@ -1193,6 +1193,7 @@ out_passthru:
 		ida_pci_info_struct pciinfo;
 		ida_pci_info_struct pciinfo;
 
 
 		if (!arg) return -EINVAL;
 		if (!arg) return -EINVAL;
+		memset(&pciinfo, 0, sizeof(pciinfo));
 		pciinfo.bus = host->pci_dev->bus->number;
 		pciinfo.bus = host->pci_dev->bus->number;
 		pciinfo.dev_fn = host->pci_dev->devfn;
 		pciinfo.dev_fn = host->pci_dev->devfn;
 		pciinfo.board_id = host->board_id;
 		pciinfo.board_id = host->board_id;

+ 0 - 36
drivers/char/tpm/xen-tpmfront.c

@@ -142,32 +142,6 @@ static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
 	return length;
 	return length;
 }
 }
 
 
-ssize_t tpm_show_locality(struct device *dev, struct device_attribute *attr,
-			  char *buf)
-{
-	struct tpm_chip *chip = dev_get_drvdata(dev);
-	struct tpm_private *priv = TPM_VPRIV(chip);
-	u8 locality = priv->shr->locality;
-
-	return sprintf(buf, "%d\n", locality);
-}
-
-ssize_t tpm_store_locality(struct device *dev, struct device_attribute *attr,
-			const char *buf, size_t len)
-{
-	struct tpm_chip *chip = dev_get_drvdata(dev);
-	struct tpm_private *priv = TPM_VPRIV(chip);
-	u8 val;
-
-	int rv = kstrtou8(buf, 0, &val);
-	if (rv)
-		return rv;
-
-	priv->shr->locality = val;
-
-	return len;
-}
-
 static const struct file_operations vtpm_ops = {
 static const struct file_operations vtpm_ops = {
 	.owner = THIS_MODULE,
 	.owner = THIS_MODULE,
 	.llseek = no_llseek,
 	.llseek = no_llseek,
@@ -188,8 +162,6 @@ static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
 static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
 static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
 static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
 static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
 static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
 static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
-static DEVICE_ATTR(locality, S_IRUGO | S_IWUSR, tpm_show_locality,
-		tpm_store_locality);
 
 
 static struct attribute *vtpm_attrs[] = {
 static struct attribute *vtpm_attrs[] = {
 	&dev_attr_pubek.attr,
 	&dev_attr_pubek.attr,
@@ -202,7 +174,6 @@ static struct attribute *vtpm_attrs[] = {
 	&dev_attr_cancel.attr,
 	&dev_attr_cancel.attr,
 	&dev_attr_durations.attr,
 	&dev_attr_durations.attr,
 	&dev_attr_timeouts.attr,
 	&dev_attr_timeouts.attr,
-	&dev_attr_locality.attr,
 	NULL,
 	NULL,
 };
 };
 
 
@@ -210,8 +181,6 @@ static struct attribute_group vtpm_attr_grp = {
 	.attrs = vtpm_attrs,
 	.attrs = vtpm_attrs,
 };
 };
 
 
-#define TPM_LONG_TIMEOUT   (10 * 60 * HZ)
-
 static const struct tpm_vendor_specific tpm_vtpm = {
 static const struct tpm_vendor_specific tpm_vtpm = {
 	.status = vtpm_status,
 	.status = vtpm_status,
 	.recv = vtpm_recv,
 	.recv = vtpm_recv,
@@ -224,11 +193,6 @@ static const struct tpm_vendor_specific tpm_vtpm = {
 	.miscdev = {
 	.miscdev = {
 		.fops = &vtpm_ops,
 		.fops = &vtpm_ops,
 	},
 	},
-	.duration = {
-		TPM_LONG_TIMEOUT,
-		TPM_LONG_TIMEOUT,
-		TPM_LONG_TIMEOUT,
-	},
 };
 };
 
 
 static irqreturn_t tpmif_interrupt(int dummy, void *dev_id)
 static irqreturn_t tpmif_interrupt(int dummy, void *dev_id)

+ 1 - 0
drivers/clocksource/Kconfig

@@ -26,6 +26,7 @@ config DW_APB_TIMER_OF
 
 
 config ARMADA_370_XP_TIMER
 config ARMADA_370_XP_TIMER
 	bool
 	bool
+	select CLKSRC_OF
 
 
 config ORION_TIMER
 config ORION_TIMER
 	select CLKSRC_OF
 	select CLKSRC_OF

+ 3 - 0
drivers/clocksource/clksrc-of.c

@@ -30,6 +30,9 @@ void __init clocksource_of_init(void)
 	clocksource_of_init_fn init_func;
 	clocksource_of_init_fn init_func;
 
 
 	for_each_matching_node_and_match(np, __clksrc_of_table, &match) {
 	for_each_matching_node_and_match(np, __clksrc_of_table, &match) {
+		if (!of_device_is_available(np))
+			continue;
+
 		init_func = match->data;
 		init_func = match->data;
 		init_func(np);
 		init_func(np);
 	}
 	}

+ 1 - 1
drivers/clocksource/em_sti.c

@@ -301,7 +301,7 @@ static void em_sti_register_clockevent(struct em_sti_priv *p)
 	ced->name = dev_name(&p->pdev->dev);
 	ced->name = dev_name(&p->pdev->dev);
 	ced->features = CLOCK_EVT_FEAT_ONESHOT;
 	ced->features = CLOCK_EVT_FEAT_ONESHOT;
 	ced->rating = 200;
 	ced->rating = 200;
-	ced->cpumask = cpumask_of(0);
+	ced->cpumask = cpu_possible_mask;
 	ced->set_next_event = em_sti_clock_event_next;
 	ced->set_next_event = em_sti_clock_event_next;
 	ced->set_mode = em_sti_clock_event_mode;
 	ced->set_mode = em_sti_clock_event_mode;
 
 

+ 9 - 1
drivers/clocksource/exynos_mct.c

@@ -428,7 +428,6 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
 				evt->irq);
 				evt->irq);
 			return -EIO;
 			return -EIO;
 		}
 		}
-		irq_set_affinity(evt->irq, cpumask_of(cpu));
 	} else {
 	} else {
 		enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
 		enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
 	}
 	}
@@ -449,6 +448,7 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
 					   unsigned long action, void *hcpu)
 					   unsigned long action, void *hcpu)
 {
 {
 	struct mct_clock_event_device *mevt;
 	struct mct_clock_event_device *mevt;
+	unsigned int cpu;
 
 
 	/*
 	/*
 	 * Grab cpu pointer in each case to avoid spurious
 	 * Grab cpu pointer in each case to avoid spurious
@@ -459,6 +459,12 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
 		mevt = this_cpu_ptr(&percpu_mct_tick);
 		mevt = this_cpu_ptr(&percpu_mct_tick);
 		exynos4_local_timer_setup(&mevt->evt);
 		exynos4_local_timer_setup(&mevt->evt);
 		break;
 		break;
+	case CPU_ONLINE:
+		cpu = (unsigned long)hcpu;
+		if (mct_int_type == MCT_INT_SPI)
+			irq_set_affinity(mct_irqs[MCT_L0_IRQ + cpu],
+						cpumask_of(cpu));
+		break;
 	case CPU_DYING:
 	case CPU_DYING:
 		mevt = this_cpu_ptr(&percpu_mct_tick);
 		mevt = this_cpu_ptr(&percpu_mct_tick);
 		exynos4_local_timer_stop(&mevt->evt);
 		exynos4_local_timer_stop(&mevt->evt);
@@ -500,6 +506,8 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem
 					 &percpu_mct_tick);
 					 &percpu_mct_tick);
 		WARN(err, "MCT: can't request IRQ %d (%d)\n",
 		WARN(err, "MCT: can't request IRQ %d (%d)\n",
 		     mct_irqs[MCT_L0_IRQ], err);
 		     mct_irqs[MCT_L0_IRQ], err);
+	} else {
+		irq_set_affinity(mct_irqs[MCT_L0_IRQ], cpumask_of(0));
 	}
 	}
 
 
 	err = register_cpu_notifier(&exynos4_mct_cpu_nb);
 	err = register_cpu_notifier(&exynos4_mct_cpu_nb);

+ 4 - 0
drivers/cpufreq/acpi-cpufreq.c

@@ -986,6 +986,10 @@ static int __init acpi_cpufreq_init(void)
 {
 {
 	int ret;
 	int ret;
 
 
+	/* don't keep reloading if cpufreq_driver exists */
+	if (cpufreq_get_current_driver())
+		return 0;
+
 	if (acpi_disabled)
 	if (acpi_disabled)
 		return 0;
 		return 0;
 
 

+ 3 - 0
drivers/cpufreq/cpufreq.c

@@ -1460,6 +1460,9 @@ unsigned int cpufreq_get(unsigned int cpu)
 {
 {
 	unsigned int ret_freq = 0;
 	unsigned int ret_freq = 0;
 
 
+	if (cpufreq_disabled() || !cpufreq_driver)
+		return -ENOENT;
+
 	if (!down_read_trylock(&cpufreq_rwsem))
 	if (!down_read_trylock(&cpufreq_rwsem))
 		return 0;
 		return 0;
 
 

+ 1 - 1
drivers/cpufreq/exynos5440-cpufreq.c

@@ -457,7 +457,7 @@ err_free_table:
 	opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
 	opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
 err_put_node:
 err_put_node:
 	of_node_put(np);
 	of_node_put(np);
-	dev_err(dvfs_info->dev, "%s: failed initialization\n", __func__);
+	dev_err(&pdev->dev, "%s: failed initialization\n", __func__);
 	return ret;
 	return ret;
 }
 }
 
 

+ 12 - 0
drivers/dma/Kconfig

@@ -154,6 +154,18 @@ config TEGRA20_APB_DMA
 	  This DMA controller transfers data from memory to peripheral fifo
 	  This DMA controller transfers data from memory to peripheral fifo
 	  or vice versa. It does not support memory to memory data transfer.
 	  or vice versa. It does not support memory to memory data transfer.
 
 
+config S3C24XX_DMAC
+	tristate "Samsung S3C24XX DMA support"
+	depends on ARCH_S3C24XX && !S3C24XX_DMA
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Support for the Samsung S3C24XX DMA controller driver. The
+	  DMA controller is having multiple DMA channels which can be
+	  configured for different peripherals like audio, UART, SPI.
+	  The DMA controller can transfer data from memory to peripheral,
+	  periphal to memory, periphal to periphal and memory to memory.
+
 source "drivers/dma/sh/Kconfig"
 source "drivers/dma/sh/Kconfig"
 
 
 config COH901318
 config COH901318

+ 1 - 0
drivers/dma/Makefile

@@ -30,6 +30,7 @@ obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
 obj-$(CONFIG_TI_EDMA) += edma.o
 obj-$(CONFIG_TI_EDMA) += edma.o
 obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
 obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
 obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
 obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
+obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o
 obj-$(CONFIG_PL330_DMA) += pl330.o
 obj-$(CONFIG_PL330_DMA) += pl330.o
 obj-$(CONFIG_PCH_DMA) += pch_dma.o
 obj-$(CONFIG_PCH_DMA) += pch_dma.o
 obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
 obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o

+ 1350 - 0
drivers/dma/s3c24xx-dma.c

@@ -0,0 +1,1350 @@
+/*
+ * S3C24XX DMA handling
+ *
+ * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
+ *
+ * based on amba-pl08x.c
+ *
+ * Copyright (c) 2006 ARM Ltd.
+ * Copyright (c) 2010 ST-Ericsson SA
+ *
+ * Author: Peter Pearse <peter.pearse@arm.com>
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * The DMA controllers in S3C24XX SoCs have a varying number of DMA signals
+ * that can be routed to any of the 4 to 8 hardware-channels.
+ *
+ * Therefore on these DMA controllers the number of channels
+ * and the number of incoming DMA signals are two totally different things.
+ * It is usually not possible to theoretically handle all physical signals,
+ * so a multiplexing scheme with possible denial of use is necessary.
+ *
+ * Open items:
+ * - bursts
+ */
+
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/platform_data/dma-s3c24xx.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+#define MAX_DMA_CHANNELS	8
+
+#define S3C24XX_DISRC			0x00
+#define S3C24XX_DISRCC			0x04
+#define S3C24XX_DISRCC_INC_INCREMENT	0
+#define S3C24XX_DISRCC_INC_FIXED	BIT(0)
+#define S3C24XX_DISRCC_LOC_AHB		0
+#define S3C24XX_DISRCC_LOC_APB		BIT(1)
+
+#define S3C24XX_DIDST			0x08
+#define S3C24XX_DIDSTC			0x0c
+#define S3C24XX_DIDSTC_INC_INCREMENT	0
+#define S3C24XX_DIDSTC_INC_FIXED	BIT(0)
+#define S3C24XX_DIDSTC_LOC_AHB		0
+#define S3C24XX_DIDSTC_LOC_APB		BIT(1)
+#define S3C24XX_DIDSTC_INT_TC0		0
+#define S3C24XX_DIDSTC_INT_RELOAD	BIT(2)
+
+#define S3C24XX_DCON			0x10
+
+#define S3C24XX_DCON_TC_MASK		0xfffff
+#define S3C24XX_DCON_DSZ_BYTE		(0 << 20)
+#define S3C24XX_DCON_DSZ_HALFWORD	(1 << 20)
+#define S3C24XX_DCON_DSZ_WORD		(2 << 20)
+#define S3C24XX_DCON_DSZ_MASK		(3 << 20)
+#define S3C24XX_DCON_DSZ_SHIFT		20
+#define S3C24XX_DCON_AUTORELOAD		0
+#define S3C24XX_DCON_NORELOAD		BIT(22)
+#define S3C24XX_DCON_HWTRIG		BIT(23)
+#define S3C24XX_DCON_HWSRC_SHIFT	24
+#define S3C24XX_DCON_SERV_SINGLE	0
+#define S3C24XX_DCON_SERV_WHOLE		BIT(27)
+#define S3C24XX_DCON_TSZ_UNIT		0
+#define S3C24XX_DCON_TSZ_BURST4		BIT(28)
+#define S3C24XX_DCON_INT		BIT(29)
+#define S3C24XX_DCON_SYNC_PCLK		0
+#define S3C24XX_DCON_SYNC_HCLK		BIT(30)
+#define S3C24XX_DCON_DEMAND		0
+#define S3C24XX_DCON_HANDSHAKE		BIT(31)
+
+#define S3C24XX_DSTAT			0x14
+#define S3C24XX_DSTAT_STAT_BUSY		BIT(20)
+#define S3C24XX_DSTAT_CURRTC_MASK	0xfffff
+
+#define S3C24XX_DMASKTRIG		0x20
+#define S3C24XX_DMASKTRIG_SWTRIG	BIT(0)
+#define S3C24XX_DMASKTRIG_ON		BIT(1)
+#define S3C24XX_DMASKTRIG_STOP		BIT(2)
+
+#define S3C24XX_DMAREQSEL		0x24
+#define S3C24XX_DMAREQSEL_HW		BIT(0)
+
+/*
+ * S3C2410, S3C2440 and S3C2442 SoCs cannot select any physical channel
+ * for a DMA source. Instead only specific channels are valid.
+ * All of these SoCs have 4 physical channels and the number of request
+ * source bits is 3. Additionally we also need 1 bit to mark the channel
+ * as valid.
+ * Therefore we separate the chansel element of the channel data into 4
+ * parts of 4 bits each, to hold the information if the channel is valid
+ * and the hw request source to use.
+ *
+ * Example:
+ * SDI is valid on channels 0, 2 and 3 - with varying hw request sources.
+ * For it the chansel field would look like
+ *
+ * ((BIT(3) | 1) << 3 * 4) | // channel 3, with request source 1
+ * ((BIT(3) | 2) << 2 * 4) | // channel 2, with request source 2
+ * ((BIT(3) | 2) << 0 * 4)   // channel 0, with request source 2
+ */
+#define S3C24XX_CHANSEL_WIDTH		4
+#define S3C24XX_CHANSEL_VALID		BIT(3)
+#define S3C24XX_CHANSEL_REQ_MASK	7
+
+/*
+ * struct soc_data - vendor-specific config parameters for individual SoCs
+ * @stride: spacing between the registers of each channel
+ * @has_reqsel: does the controller use the newer requestselection mechanism
+ * @has_clocks: are controllable dma-clocks present
+ */
+struct soc_data {
+	int stride;
+	bool has_reqsel;
+	bool has_clocks;
+};
+
+/*
+ * enum s3c24xx_dma_chan_state - holds the virtual channel states
+ * @S3C24XX_DMA_CHAN_IDLE: the channel is idle
+ * @S3C24XX_DMA_CHAN_RUNNING: the channel has allocated a physical transport
+ * channel and is running a transfer on it
+ * @S3C24XX_DMA_CHAN_WAITING: the channel is waiting for a physical transport
+ * channel to become available (only pertains to memcpy channels)
+ */
+enum s3c24xx_dma_chan_state {
+	S3C24XX_DMA_CHAN_IDLE,
+	S3C24XX_DMA_CHAN_RUNNING,
+	S3C24XX_DMA_CHAN_WAITING,
+};
+
+/*
+ * struct s3c24xx_sg - structure containing data per sg
+ * @src_addr: src address of sg
+ * @dst_addr: dst address of sg
+ * @len: transfer len in bytes
+ * @node: node for txd's dsg_list
+ */
+struct s3c24xx_sg {
+	dma_addr_t src_addr;
+	dma_addr_t dst_addr;
+	size_t len;
+	struct list_head node;
+};
+
+/*
+ * struct s3c24xx_txd - wrapper for struct dma_async_tx_descriptor
+ * @vd: virtual DMA descriptor
+ * @dsg_list: list of children sg's
+ * @at: sg currently being transfered
+ * @width: transfer width
+ * @disrcc: value for source control register
+ * @didstc: value for destination control register
+ * @dcon: base value for dcon register
+ */
+struct s3c24xx_txd {
+	struct virt_dma_desc vd;
+	struct list_head dsg_list;
+	struct list_head *at;
+	u8 width;
+	u32 disrcc;
+	u32 didstc;
+	u32 dcon;
+};
+
+struct s3c24xx_dma_chan;
+
+/*
+ * struct s3c24xx_dma_phy - holder for the physical channels
+ * @id: physical index to this channel
+ * @valid: does the channel have all required elements
+ * @base: virtual memory base (remapped) for the this channel
+ * @irq: interrupt for this channel
+ * @clk: clock for this channel
+ * @lock: a lock to use when altering an instance of this struct
+ * @serving: virtual channel currently being served by this physicalchannel
+ * @host: a pointer to the host (internal use)
+ */
+struct s3c24xx_dma_phy {
+	unsigned int			id;
+	bool				valid;
+	void __iomem			*base;
+	unsigned int			irq;
+	struct clk			*clk;
+	spinlock_t			lock;
+	struct s3c24xx_dma_chan		*serving;
+	struct s3c24xx_dma_engine	*host;
+};
+
+/*
+ * struct s3c24xx_dma_chan - this structure wraps a DMA ENGINE channel
+ * @id: the id of the channel
+ * @name: name of the channel
+ * @vc: wrappped virtual channel
+ * @phy: the physical channel utilized by this channel, if there is one
+ * @runtime_addr: address for RX/TX according to the runtime config
+ * @at: active transaction on this channel
+ * @lock: a lock for this channel data
+ * @host: a pointer to the host (internal use)
+ * @state: whether the channel is idle, running etc
+ * @slave: whether this channel is a device (slave) or for memcpy
+ */
+struct s3c24xx_dma_chan {
+	int id;
+	const char *name;
+	struct virt_dma_chan vc;
+	struct s3c24xx_dma_phy *phy;
+	struct dma_slave_config cfg;
+	struct s3c24xx_txd *at;
+	struct s3c24xx_dma_engine *host;
+	enum s3c24xx_dma_chan_state state;
+	bool slave;
+};
+
+/*
+ * struct s3c24xx_dma_engine - the local state holder for the S3C24XX
+ * @pdev: the corresponding platform device
+ * @pdata: platform data passed in from the platform/machine
+ * @base: virtual memory base (remapped)
+ * @slave: slave engine for this instance
+ * @memcpy: memcpy engine for this instance
+ * @phy_chans: array of data for the physical channels
+ */
+struct s3c24xx_dma_engine {
+	struct platform_device			*pdev;
+	const struct s3c24xx_dma_platdata	*pdata;
+	struct soc_data				*sdata;
+	void __iomem				*base;
+	struct dma_device			slave;
+	struct dma_device			memcpy;
+	struct s3c24xx_dma_phy			*phy_chans;
+};
+
+/*
+ * Physical channel handling
+ */
+
+/*
+ * Check whether a certain channel is busy or not.
+ */
+static int s3c24xx_dma_phy_busy(struct s3c24xx_dma_phy *phy)
+{
+	unsigned int val = readl(phy->base + S3C24XX_DSTAT);
+	return val & S3C24XX_DSTAT_STAT_BUSY;
+}
+
+static bool s3c24xx_dma_phy_valid(struct s3c24xx_dma_chan *s3cchan,
+				  struct s3c24xx_dma_phy *phy)
+{
+	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+	const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
+	struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
+	int phyvalid;
+
+	/* every phy is valid for memcopy channels */
+	if (!s3cchan->slave)
+		return true;
+
+	/* On newer variants all phys can be used for all virtual channels */
+	if (s3cdma->sdata->has_reqsel)
+		return true;
+
+	phyvalid = (cdata->chansel >> (phy->id * S3C24XX_CHANSEL_WIDTH));
+	return (phyvalid & S3C24XX_CHANSEL_VALID) ? true : false;
+}
+
+/*
+ * Allocate a physical channel for a virtual channel
+ *
+ * Try to locate a physical channel to be used for this transfer. If all
+ * are taken return NULL and the requester will have to cope by using
+ * some fallback PIO mode or retrying later.
+ */
+static
+struct s3c24xx_dma_phy *s3c24xx_dma_get_phy(struct s3c24xx_dma_chan *s3cchan)
+{
+	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+	const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
+	struct s3c24xx_dma_channel *cdata;
+	struct s3c24xx_dma_phy *phy = NULL;
+	unsigned long flags;
+	int i;
+	int ret;
+
+	if (s3cchan->slave)
+		cdata = &pdata->channels[s3cchan->id];
+
+	for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) {
+		phy = &s3cdma->phy_chans[i];
+
+		if (!phy->valid)
+			continue;
+
+		if (!s3c24xx_dma_phy_valid(s3cchan, phy))
+			continue;
+
+		spin_lock_irqsave(&phy->lock, flags);
+
+		if (!phy->serving) {
+			phy->serving = s3cchan;
+			spin_unlock_irqrestore(&phy->lock, flags);
+			break;
+		}
+
+		spin_unlock_irqrestore(&phy->lock, flags);
+	}
+
+	/* No physical channel available, cope with it */
+	if (i == s3cdma->pdata->num_phy_channels) {
+		dev_warn(&s3cdma->pdev->dev, "no phy channel available\n");
+		return NULL;
+	}
+
+	/* start the phy clock */
+	if (s3cdma->sdata->has_clocks) {
+		ret = clk_enable(phy->clk);
+		if (ret) {
+			dev_err(&s3cdma->pdev->dev, "could not enable clock for channel %d, err %d\n",
+				phy->id, ret);
+			phy->serving = NULL;
+			return NULL;
+		}
+	}
+
+	return phy;
+}
+
+/*
+ * Mark the physical channel as free.
+ *
+ * This drops the link between the physical and virtual channel.
+ */
+static inline void s3c24xx_dma_put_phy(struct s3c24xx_dma_phy *phy)
+{
+	struct s3c24xx_dma_engine *s3cdma = phy->host;
+
+	if (s3cdma->sdata->has_clocks)
+		clk_disable(phy->clk);
+
+	phy->serving = NULL;
+}
+
+/*
+ * Stops the channel by writing the stop bit.
+ * This should not be used for an on-going transfer, but as a method of
+ * shutting down a channel (eg, when it's no longer used) or terminating a
+ * transfer.
+ */
+static void s3c24xx_dma_terminate_phy(struct s3c24xx_dma_phy *phy)
+{
+	writel(S3C24XX_DMASKTRIG_STOP, phy->base + S3C24XX_DMASKTRIG);
+}
+
+/*
+ * Virtual channel handling
+ */
+
+static inline
+struct s3c24xx_dma_chan *to_s3c24xx_dma_chan(struct dma_chan *chan)
+{
+	return container_of(chan, struct s3c24xx_dma_chan, vc.chan);
+}
+
+static u32 s3c24xx_dma_getbytes_chan(struct s3c24xx_dma_chan *s3cchan)
+{
+	struct s3c24xx_dma_phy *phy = s3cchan->phy;
+	struct s3c24xx_txd *txd = s3cchan->at;
+	u32 tc = readl(phy->base + S3C24XX_DSTAT) & S3C24XX_DSTAT_CURRTC_MASK;
+
+	return tc * txd->width;
+}
+
+static int s3c24xx_dma_set_runtime_config(struct s3c24xx_dma_chan *s3cchan,
+				  struct dma_slave_config *config)
+{
+	if (!s3cchan->slave)
+		return -EINVAL;
+
+	/* Reject definitely invalid configurations */
+	if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
+	    config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
+		return -EINVAL;
+
+	s3cchan->cfg = *config;
+
+	return 0;
+}
+
+/*
+ * Transfer handling
+ */
+
+static inline
+struct s3c24xx_txd *to_s3c24xx_txd(struct dma_async_tx_descriptor *tx)
+{
+	return container_of(tx, struct s3c24xx_txd, vd.tx);
+}
+
+static struct s3c24xx_txd *s3c24xx_dma_get_txd(void)
+{
+	struct s3c24xx_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
+
+	if (txd) {
+		INIT_LIST_HEAD(&txd->dsg_list);
+		txd->dcon = S3C24XX_DCON_INT | S3C24XX_DCON_NORELOAD;
+	}
+
+	return txd;
+}
+
+static void s3c24xx_dma_free_txd(struct s3c24xx_txd *txd)
+{
+	struct s3c24xx_sg *dsg, *_dsg;
+
+	list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
+		list_del(&dsg->node);
+		kfree(dsg);
+	}
+
+	kfree(txd);
+}
+
+static void s3c24xx_dma_start_next_sg(struct s3c24xx_dma_chan *s3cchan,
+				       struct s3c24xx_txd *txd)
+{
+	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+	struct s3c24xx_dma_phy *phy = s3cchan->phy;
+	const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
+	struct s3c24xx_sg *dsg = list_entry(txd->at, struct s3c24xx_sg, node);
+	u32 dcon = txd->dcon;
+	u32 val;
+
+	/* transfer-size and -count from len and width */
+	switch (txd->width) {
+	case 1:
+		dcon |= S3C24XX_DCON_DSZ_BYTE | dsg->len;
+		break;
+	case 2:
+		dcon |= S3C24XX_DCON_DSZ_HALFWORD | (dsg->len / 2);
+		break;
+	case 4:
+		dcon |= S3C24XX_DCON_DSZ_WORD | (dsg->len / 4);
+		break;
+	}
+
+	if (s3cchan->slave) {
+		struct s3c24xx_dma_channel *cdata =
+					&pdata->channels[s3cchan->id];
+
+		if (s3cdma->sdata->has_reqsel) {
+			writel_relaxed((cdata->chansel << 1) |
+							S3C24XX_DMAREQSEL_HW,
+					phy->base + S3C24XX_DMAREQSEL);
+		} else {
+			int csel = cdata->chansel >> (phy->id *
+							S3C24XX_CHANSEL_WIDTH);
+
+			csel &= S3C24XX_CHANSEL_REQ_MASK;
+			dcon |= csel << S3C24XX_DCON_HWSRC_SHIFT;
+			dcon |= S3C24XX_DCON_HWTRIG;
+		}
+	} else {
+		if (s3cdma->sdata->has_reqsel)
+			writel_relaxed(0, phy->base + S3C24XX_DMAREQSEL);
+	}
+
+	writel_relaxed(dsg->src_addr, phy->base + S3C24XX_DISRC);
+	writel_relaxed(txd->disrcc, phy->base + S3C24XX_DISRCC);
+	writel_relaxed(dsg->dst_addr, phy->base + S3C24XX_DIDST);
+	writel_relaxed(txd->didstc, phy->base + S3C24XX_DIDSTC);
+	writel_relaxed(dcon, phy->base + S3C24XX_DCON);
+
+	val = readl_relaxed(phy->base + S3C24XX_DMASKTRIG);
+	val &= ~S3C24XX_DMASKTRIG_STOP;
+	val |= S3C24XX_DMASKTRIG_ON;
+
+	/* trigger the dma operation for memcpy transfers */
+	if (!s3cchan->slave)
+		val |= S3C24XX_DMASKTRIG_SWTRIG;
+
+	writel(val, phy->base + S3C24XX_DMASKTRIG);
+}
+
+/*
+ * Set the initial DMA register values and start first sg.
+ */
+static void s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan *s3cchan)
+{
+	struct s3c24xx_dma_phy *phy = s3cchan->phy;
+	struct virt_dma_desc *vd = vchan_next_desc(&s3cchan->vc);
+	struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
+
+	list_del(&txd->vd.node);
+
+	s3cchan->at = txd;
+
+	/* Wait for channel inactive */
+	while (s3c24xx_dma_phy_busy(phy))
+		cpu_relax();
+
+	/* point to the first element of the sg list */
+	txd->at = txd->dsg_list.next;
+	s3c24xx_dma_start_next_sg(s3cchan, txd);
+}
+
+static void s3c24xx_dma_free_txd_list(struct s3c24xx_dma_engine *s3cdma,
+				struct s3c24xx_dma_chan *s3cchan)
+{
+	LIST_HEAD(head);
+
+	vchan_get_all_descriptors(&s3cchan->vc, &head);
+	vchan_dma_desc_free_list(&s3cchan->vc, &head);
+}
+
+/*
+ * Try to allocate a physical channel.  When successful, assign it to
+ * this virtual channel, and initiate the next descriptor.  The
+ * virtual channel lock must be held at this point.
+ */
+static void s3c24xx_dma_phy_alloc_and_start(struct s3c24xx_dma_chan *s3cchan)
+{
+	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+	struct s3c24xx_dma_phy *phy;
+
+	phy = s3c24xx_dma_get_phy(s3cchan);
+	if (!phy) {
+		dev_dbg(&s3cdma->pdev->dev, "no physical channel available for xfer on %s\n",
+			s3cchan->name);
+		s3cchan->state = S3C24XX_DMA_CHAN_WAITING;
+		return;
+	}
+
+	dev_dbg(&s3cdma->pdev->dev, "allocated physical channel %d for xfer on %s\n",
+		phy->id, s3cchan->name);
+
+	s3cchan->phy = phy;
+	s3cchan->state = S3C24XX_DMA_CHAN_RUNNING;
+
+	s3c24xx_dma_start_next_txd(s3cchan);
+}
+
+static void s3c24xx_dma_phy_reassign_start(struct s3c24xx_dma_phy *phy,
+	struct s3c24xx_dma_chan *s3cchan)
+{
+	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+
+	dev_dbg(&s3cdma->pdev->dev, "reassigned physical channel %d for xfer on %s\n",
+		phy->id, s3cchan->name);
+
+	/*
+	 * We do this without taking the lock; we're really only concerned
+	 * about whether this pointer is NULL or not, and we're guaranteed
+	 * that this will only be called when it _already_ is non-NULL.
+	 */
+	phy->serving = s3cchan;
+	s3cchan->phy = phy;
+	s3cchan->state = S3C24XX_DMA_CHAN_RUNNING;
+	s3c24xx_dma_start_next_txd(s3cchan);
+}
+
+/*
+ * Free a physical DMA channel, potentially reallocating it to another
+ * virtual channel if we have any pending.
+ */
+static void s3c24xx_dma_phy_free(struct s3c24xx_dma_chan *s3cchan)
+{
+	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+	struct s3c24xx_dma_chan *p, *next;
+
+retry:
+	next = NULL;
+
+	/* Find a waiting virtual channel for the next transfer. */
+	list_for_each_entry(p, &s3cdma->memcpy.channels, vc.chan.device_node)
+		if (p->state == S3C24XX_DMA_CHAN_WAITING) {
+			next = p;
+			break;
+		}
+
+	if (!next) {
+		list_for_each_entry(p, &s3cdma->slave.channels,
+				    vc.chan.device_node)
+			if (p->state == S3C24XX_DMA_CHAN_WAITING &&
+				      s3c24xx_dma_phy_valid(p, s3cchan->phy)) {
+				next = p;
+				break;
+			}
+	}
+
+	/* Ensure that the physical channel is stopped */
+	s3c24xx_dma_terminate_phy(s3cchan->phy);
+
+	if (next) {
+		bool success;
+
+		/*
+		 * Eww.  We know this isn't going to deadlock
+		 * but lockdep probably doesn't.
+		 */
+		spin_lock(&next->vc.lock);
+		/* Re-check the state now that we have the lock */
+		success = next->state == S3C24XX_DMA_CHAN_WAITING;
+		if (success)
+			s3c24xx_dma_phy_reassign_start(s3cchan->phy, next);
+		spin_unlock(&next->vc.lock);
+
+		/* If the state changed, try to find another channel */
+		if (!success)
+			goto retry;
+	} else {
+		/* No more jobs, so free up the physical channel */
+		s3c24xx_dma_put_phy(s3cchan->phy);
+	}
+
+	s3cchan->phy = NULL;
+	s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
+}
+
+static void s3c24xx_dma_unmap_buffers(struct s3c24xx_txd *txd)
+{
+	struct device *dev = txd->vd.tx.chan->device->dev;
+	struct s3c24xx_sg *dsg;
+
+	if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+		if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+			list_for_each_entry(dsg, &txd->dsg_list, node)
+				dma_unmap_single(dev, dsg->src_addr, dsg->len,
+						DMA_TO_DEVICE);
+		else {
+			list_for_each_entry(dsg, &txd->dsg_list, node)
+				dma_unmap_page(dev, dsg->src_addr, dsg->len,
+						DMA_TO_DEVICE);
+		}
+	}
+
+	if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+		if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+			list_for_each_entry(dsg, &txd->dsg_list, node)
+				dma_unmap_single(dev, dsg->dst_addr, dsg->len,
+						DMA_FROM_DEVICE);
+		else
+			list_for_each_entry(dsg, &txd->dsg_list, node)
+				dma_unmap_page(dev, dsg->dst_addr, dsg->len,
+						DMA_FROM_DEVICE);
+	}
+}
+
+static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd)
+{
+	struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
+	struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan);
+
+	if (!s3cchan->slave)
+		s3c24xx_dma_unmap_buffers(txd);
+
+	s3c24xx_dma_free_txd(txd);
+}
+
+static irqreturn_t s3c24xx_dma_irq(int irq, void *data)
+{
+	struct s3c24xx_dma_phy *phy = data;
+	struct s3c24xx_dma_chan *s3cchan = phy->serving;
+	struct s3c24xx_txd *txd;
+
+	dev_dbg(&phy->host->pdev->dev, "interrupt on channel %d\n", phy->id);
+
+	/*
+	 * Interrupts happen to notify the completion of a transfer and the
+	 * channel should have moved into its stop state already on its own.
+	 * Therefore interrupts on channels not bound to a virtual channel
+	 * should never happen. Nevertheless send a terminate command to the
+	 * channel if the unlikely case happens.
+	 */
+	if (unlikely(!s3cchan)) {
+		dev_err(&phy->host->pdev->dev, "interrupt on unused channel %d\n",
+			phy->id);
+
+		s3c24xx_dma_terminate_phy(phy);
+
+		return IRQ_HANDLED;
+	}
+
+	spin_lock(&s3cchan->vc.lock);
+	txd = s3cchan->at;
+	if (txd) {
+		/* when more sg's are in this txd, start the next one */
+		if (!list_is_last(txd->at, &txd->dsg_list)) {
+			txd->at = txd->at->next;
+			s3c24xx_dma_start_next_sg(s3cchan, txd);
+		} else {
+			s3cchan->at = NULL;
+			vchan_cookie_complete(&txd->vd);
+
+			/*
+			 * And start the next descriptor (if any),
+			 * otherwise free this channel.
+			 */
+			if (vchan_next_desc(&s3cchan->vc))
+				s3c24xx_dma_start_next_txd(s3cchan);
+			else
+				s3c24xx_dma_phy_free(s3cchan);
+		}
+	}
+	spin_unlock(&s3cchan->vc.lock);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * The DMA ENGINE API
+ */
+
+static int s3c24xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			 unsigned long arg)
+{
+	struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
+	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&s3cchan->vc.lock, flags);
+
+	switch (cmd) {
+	case DMA_SLAVE_CONFIG:
+		ret = s3c24xx_dma_set_runtime_config(s3cchan,
+					      (struct dma_slave_config *)arg);
+		break;
+	case DMA_TERMINATE_ALL:
+		if (!s3cchan->phy && !s3cchan->at) {
+			dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n",
+				s3cchan->id);
+			ret = -EINVAL;
+			break;
+		}
+
+		s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
+
+		 /* Mark physical channel as free */
+		if (s3cchan->phy)
+			s3c24xx_dma_phy_free(s3cchan);
+
+		/* Dequeue current job */
+		if (s3cchan->at) {
+			s3c24xx_dma_desc_free(&s3cchan->at->vd);
+			s3cchan->at = NULL;
+		}
+
+		/* Dequeue jobs not yet fired as well */
+		s3c24xx_dma_free_txd_list(s3cdma, s3cchan);
+		break;
+	default:
+		/* Unknown command */
+		ret = -ENXIO;
+		break;
+	}
+
+	spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
+
+	return ret;
+}
+
+static int s3c24xx_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+	return 0;
+}
+
+static void s3c24xx_dma_free_chan_resources(struct dma_chan *chan)
+{
+	/* Ensure all queued descriptors are freed */
+	vchan_free_chan_resources(to_virt_chan(chan));
+}
+
+static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan,
+		dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+	struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
+	struct s3c24xx_txd *txd;
+	struct s3c24xx_sg *dsg;
+	struct virt_dma_desc *vd;
+	unsigned long flags;
+	enum dma_status ret;
+	size_t bytes = 0;
+
+	spin_lock_irqsave(&s3cchan->vc.lock, flags);
+	ret = dma_cookie_status(chan, cookie, txstate);
+	if (ret == DMA_SUCCESS) {
+		spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
+		return ret;
+	}
+
+	/*
+	 * There's no point calculating the residue if there's
+	 * no txstate to store the value.
+	 */
+	if (!txstate) {
+		spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
+		return ret;
+	}
+
+	vd = vchan_find_desc(&s3cchan->vc, cookie);
+	if (vd) {
+		/* On the issued list, so hasn't been processed yet */
+		txd = to_s3c24xx_txd(&vd->tx);
+
+		list_for_each_entry(dsg, &txd->dsg_list, node)
+			bytes += dsg->len;
+	} else {
+		/*
+		 * Currently running, so sum over the pending sg's and
+		 * the currently active one.
+		 */
+		txd = s3cchan->at;
+
+		dsg = list_entry(txd->at, struct s3c24xx_sg, node);
+		list_for_each_entry_from(dsg, &txd->dsg_list, node)
+			bytes += dsg->len;
+
+		bytes += s3c24xx_dma_getbytes_chan(s3cchan);
+	}
+	spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
+
+	/*
+	 * This cookie not complete yet
+	 * Get number of bytes left in the active transactions and queue
+	 */
+	dma_set_residue(txstate, bytes);
+
+	/* Whether waiting or running, we're in progress */
+	return ret;
+}
+
+/*
+ * Initialize a descriptor to be used by memcpy submit
+ */
+static struct dma_async_tx_descriptor *s3c24xx_dma_prep_memcpy(
+		struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+		size_t len, unsigned long flags)
+{
+	struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
+	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+	struct s3c24xx_txd *txd;
+	struct s3c24xx_sg *dsg;
+	int src_mod, dest_mod;
+
+	dev_dbg(&s3cdma->pdev->dev, "prepare memcpy of %d bytes from %s\n",
+			len, s3cchan->name);
+
+	if ((len & S3C24XX_DCON_TC_MASK) != len) {
+		dev_err(&s3cdma->pdev->dev, "memcpy size %d to large\n", len);
+		return NULL;
+	}
+
+	txd = s3c24xx_dma_get_txd();
+	if (!txd)
+		return NULL;
+
+	dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
+	if (!dsg) {
+		s3c24xx_dma_free_txd(txd);
+		return NULL;
+	}
+	list_add_tail(&dsg->node, &txd->dsg_list);
+
+	dsg->src_addr = src;
+	dsg->dst_addr = dest;
+	dsg->len = len;
+
+	/*
+	 * Determine a suitable transfer width.
+	 * The DMA controller cannot fetch/store information which is not
+	 * naturally aligned on the bus, i.e., a 4 byte fetch must start at
+	 * an address divisible by 4 - more generally addr % width must be 0.
+	 */
+	src_mod = src % 4;
+	dest_mod = dest % 4;
+	switch (len % 4) {
+	case 0:
+		txd->width = (src_mod == 0 && dest_mod == 0) ? 4 : 1;
+		break;
+	case 2:
+		txd->width = ((src_mod == 2 || src_mod == 0) &&
+			      (dest_mod == 2 || dest_mod == 0)) ? 2 : 1;
+		break;
+	default:
+		txd->width = 1;
+		break;
+	}
+
+	txd->disrcc = S3C24XX_DISRCC_LOC_AHB | S3C24XX_DISRCC_INC_INCREMENT;
+	txd->didstc = S3C24XX_DIDSTC_LOC_AHB | S3C24XX_DIDSTC_INC_INCREMENT;
+	txd->dcon |= S3C24XX_DCON_DEMAND | S3C24XX_DCON_SYNC_HCLK |
+		     S3C24XX_DCON_SERV_WHOLE;
+
+	return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *s3c24xx_dma_prep_slave_sg(
+		struct dma_chan *chan, struct scatterlist *sgl,
+		unsigned int sg_len, enum dma_transfer_direction direction,
+		unsigned long flags, void *context)
+{
+	struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
+	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+	const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
+	struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
+	struct s3c24xx_txd *txd;
+	struct s3c24xx_sg *dsg;
+	struct scatterlist *sg;
+	dma_addr_t slave_addr;
+	u32 hwcfg = 0;
+	int tmp;
+
+	dev_dbg(&s3cdma->pdev->dev, "prepare transaction of %d bytes from %s\n",
+			sg_dma_len(sgl), s3cchan->name);
+
+	txd = s3c24xx_dma_get_txd();
+	if (!txd)
+		return NULL;
+
+	if (cdata->handshake)
+		txd->dcon |= S3C24XX_DCON_HANDSHAKE;
+
+	switch (cdata->bus) {
+	case S3C24XX_DMA_APB:
+		txd->dcon |= S3C24XX_DCON_SYNC_PCLK;
+		hwcfg |= S3C24XX_DISRCC_LOC_APB;
+		break;
+	case S3C24XX_DMA_AHB:
+		txd->dcon |= S3C24XX_DCON_SYNC_HCLK;
+		hwcfg |= S3C24XX_DISRCC_LOC_AHB;
+		break;
+	}
+
+	/*
+	 * Always assume our peripheral desintation is a fixed
+	 * address in memory.
+	 */
+	hwcfg |= S3C24XX_DISRCC_INC_FIXED;
+
+	/*
+	 * Individual dma operations are requested by the slave,
+	 * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE).
+	 */
+	txd->dcon |= S3C24XX_DCON_SERV_SINGLE;
+
+	if (direction == DMA_MEM_TO_DEV) {
+		txd->disrcc = S3C24XX_DISRCC_LOC_AHB |
+			      S3C24XX_DISRCC_INC_INCREMENT;
+		txd->didstc = hwcfg;
+		slave_addr = s3cchan->cfg.dst_addr;
+		txd->width = s3cchan->cfg.dst_addr_width;
+	} else if (direction == DMA_DEV_TO_MEM) {
+		txd->disrcc = hwcfg;
+		txd->didstc = S3C24XX_DIDSTC_LOC_AHB |
+			      S3C24XX_DIDSTC_INC_INCREMENT;
+		slave_addr = s3cchan->cfg.src_addr;
+		txd->width = s3cchan->cfg.src_addr_width;
+	} else {
+		s3c24xx_dma_free_txd(txd);
+		dev_err(&s3cdma->pdev->dev,
+			"direction %d unsupported\n", direction);
+		return NULL;
+	}
+
+	for_each_sg(sgl, sg, sg_len, tmp) {
+		dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
+		if (!dsg) {
+			s3c24xx_dma_free_txd(txd);
+			return NULL;
+		}
+		list_add_tail(&dsg->node, &txd->dsg_list);
+
+		dsg->len = sg_dma_len(sg);
+		if (direction == DMA_MEM_TO_DEV) {
+			dsg->src_addr = sg_dma_address(sg);
+			dsg->dst_addr = slave_addr;
+		} else { /* DMA_DEV_TO_MEM */
+			dsg->src_addr = slave_addr;
+			dsg->dst_addr = sg_dma_address(sg);
+		}
+		break;
+	}
+
+	return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
+}
+
+/*
+ * Slave transactions callback to the slave device to allow
+ * synchronization of slave DMA signals with the DMAC enable
+ */
+static void s3c24xx_dma_issue_pending(struct dma_chan *chan)
+{
+	struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&s3cchan->vc.lock, flags);
+	if (vchan_issue_pending(&s3cchan->vc)) {
+		if (!s3cchan->phy && s3cchan->state != S3C24XX_DMA_CHAN_WAITING)
+			s3c24xx_dma_phy_alloc_and_start(s3cchan);
+	}
+	spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
+}
+
+/*
+ * Bringup and teardown
+ */
+
+/*
+ * Initialise the DMAC memcpy/slave channels.
+ * Make a local wrapper to hold required data
+ */
+static int s3c24xx_dma_init_virtual_channels(struct s3c24xx_dma_engine *s3cdma,
+		struct dma_device *dmadev, unsigned int channels, bool slave)
+{
+	struct s3c24xx_dma_chan *chan;
+	int i;
+
+	INIT_LIST_HEAD(&dmadev->channels);
+
+	/*
+	 * Register as many many memcpy as we have physical channels,
+	 * we won't always be able to use all but the code will have
+	 * to cope with that situation.
+	 */
+	for (i = 0; i < channels; i++) {
+		chan = devm_kzalloc(dmadev->dev, sizeof(*chan), GFP_KERNEL);
+		if (!chan) {
+			dev_err(dmadev->dev,
+				"%s no memory for channel\n", __func__);
+			return -ENOMEM;
+		}
+
+		chan->id = i;
+		chan->host = s3cdma;
+		chan->state = S3C24XX_DMA_CHAN_IDLE;
+
+		if (slave) {
+			chan->slave = true;
+			chan->name = kasprintf(GFP_KERNEL, "slave%d", i);
+			if (!chan->name)
+				return -ENOMEM;
+		} else {
+			chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
+			if (!chan->name)
+				return -ENOMEM;
+		}
+		dev_dbg(dmadev->dev,
+			 "initialize virtual channel \"%s\"\n",
+			 chan->name);
+
+		chan->vc.desc_free = s3c24xx_dma_desc_free;
+		vchan_init(&chan->vc, dmadev);
+	}
+	dev_info(dmadev->dev, "initialized %d virtual %s channels\n",
+		 i, slave ? "slave" : "memcpy");
+	return i;
+}
+
+static void s3c24xx_dma_free_virtual_channels(struct dma_device *dmadev)
+{
+	struct s3c24xx_dma_chan *chan = NULL;
+	struct s3c24xx_dma_chan *next;
+
+	list_for_each_entry_safe(chan,
+				 next, &dmadev->channels, vc.chan.device_node)
+		list_del(&chan->vc.chan.device_node);
+}
+
+/* s3c2410, s3c2440 and s3c2442 have a 0x40 stride without separate clocks */
+static struct soc_data soc_s3c2410 = {
+	.stride = 0x40,
+	.has_reqsel = false,
+	.has_clocks = false,
+};
+
+/* s3c2412 and s3c2413 have a 0x40 stride and dmareqsel mechanism */
+static struct soc_data soc_s3c2412 = {
+	.stride = 0x40,
+	.has_reqsel = true,
+	.has_clocks = true,
+};
+
+/* s3c2443 and following have a 0x100 stride and dmareqsel mechanism */
+static struct soc_data soc_s3c2443 = {
+	.stride = 0x100,
+	.has_reqsel = true,
+	.has_clocks = true,
+};
+
+static struct platform_device_id s3c24xx_dma_driver_ids[] = {
+	{
+		.name		= "s3c2410-dma",
+		.driver_data	= (kernel_ulong_t)&soc_s3c2410,
+	}, {
+		.name		= "s3c2412-dma",
+		.driver_data	= (kernel_ulong_t)&soc_s3c2412,
+	}, {
+		.name		= "s3c2443-dma",
+		.driver_data	= (kernel_ulong_t)&soc_s3c2443,
+	},
+	{ },
+};
+
+static struct soc_data *s3c24xx_dma_get_soc_data(struct platform_device *pdev)
+{
+	return (struct soc_data *)
+			 platform_get_device_id(pdev)->driver_data;
+}
+
+static int s3c24xx_dma_probe(struct platform_device *pdev)
+{
+	const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev);
+	struct s3c24xx_dma_engine *s3cdma;
+	struct soc_data *sdata;
+	struct resource *res;
+	int ret;
+	int i;
+
+	if (!pdata) {
+		dev_err(&pdev->dev, "platform data missing\n");
+		return -ENODEV;
+	}
+
+	/* Basic sanity check */
+	if (pdata->num_phy_channels > MAX_DMA_CHANNELS) {
+		dev_err(&pdev->dev, "to many dma channels %d, max %d\n",
+			pdata->num_phy_channels, MAX_DMA_CHANNELS);
+		return -EINVAL;
+	}
+
+	sdata = s3c24xx_dma_get_soc_data(pdev);
+	if (!sdata)
+		return -EINVAL;
+
+	s3cdma = devm_kzalloc(&pdev->dev, sizeof(*s3cdma), GFP_KERNEL);
+	if (!s3cdma)
+		return -ENOMEM;
+
+	s3cdma->pdev = pdev;
+	s3cdma->pdata = pdata;
+	s3cdma->sdata = sdata;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	s3cdma->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(s3cdma->base))
+		return PTR_ERR(s3cdma->base);
+
+	s3cdma->phy_chans = devm_kzalloc(&pdev->dev,
+					      sizeof(struct s3c24xx_dma_phy) *
+							pdata->num_phy_channels,
+					      GFP_KERNEL);
+	if (!s3cdma->phy_chans)
+		return -ENOMEM;
+
+	/* aquire irqs and clocks for all physical channels */
+	for (i = 0; i < pdata->num_phy_channels; i++) {
+		struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
+		char clk_name[6];
+
+		phy->id = i;
+		phy->base = s3cdma->base + (i * sdata->stride);
+		phy->host = s3cdma;
+
+		phy->irq = platform_get_irq(pdev, i);
+		if (phy->irq < 0) {
+			dev_err(&pdev->dev, "failed to get irq %d, err %d\n",
+				i, phy->irq);
+			continue;
+		}
+
+		ret = devm_request_irq(&pdev->dev, phy->irq, s3c24xx_dma_irq,
+				       0, pdev->name, phy);
+		if (ret) {
+			dev_err(&pdev->dev, "Unable to request irq for channel %d, error %d\n",
+				i, ret);
+			continue;
+		}
+
+		if (sdata->has_clocks) {
+			sprintf(clk_name, "dma.%d", i);
+			phy->clk = devm_clk_get(&pdev->dev, clk_name);
+			if (IS_ERR(phy->clk) && sdata->has_clocks) {
+				dev_err(&pdev->dev, "unable to aquire clock for channel %d, error %lu",
+					i, PTR_ERR(phy->clk));
+				continue;
+			}
+
+			ret = clk_prepare(phy->clk);
+			if (ret) {
+				dev_err(&pdev->dev, "clock for phy %d failed, error %d\n",
+					i, ret);
+				continue;
+			}
+		}
+
+		spin_lock_init(&phy->lock);
+		phy->valid = true;
+
+		dev_dbg(&pdev->dev, "physical channel %d is %s\n",
+			i, s3c24xx_dma_phy_busy(phy) ? "BUSY" : "FREE");
+	}
+
+	/* Initialize memcpy engine */
+	dma_cap_set(DMA_MEMCPY, s3cdma->memcpy.cap_mask);
+	dma_cap_set(DMA_PRIVATE, s3cdma->memcpy.cap_mask);
+	s3cdma->memcpy.dev = &pdev->dev;
+	s3cdma->memcpy.device_alloc_chan_resources =
+					s3c24xx_dma_alloc_chan_resources;
+	s3cdma->memcpy.device_free_chan_resources =
+					s3c24xx_dma_free_chan_resources;
+	s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy;
+	s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status;
+	s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending;
+	s3cdma->memcpy.device_control = s3c24xx_dma_control;
+
+	/* Initialize slave engine for SoC internal dedicated peripherals */
+	dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask);
+	dma_cap_set(DMA_PRIVATE, s3cdma->slave.cap_mask);
+	s3cdma->slave.dev = &pdev->dev;
+	s3cdma->slave.device_alloc_chan_resources =
+					s3c24xx_dma_alloc_chan_resources;
+	s3cdma->slave.device_free_chan_resources =
+					s3c24xx_dma_free_chan_resources;
+	s3cdma->slave.device_tx_status = s3c24xx_dma_tx_status;
+	s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending;
+	s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg;
+	s3cdma->slave.device_control = s3c24xx_dma_control;
+
+	/* Register as many memcpy channels as there are physical channels */
+	ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy,
+						pdata->num_phy_channels, false);
+	if (ret <= 0) {
+		dev_warn(&pdev->dev,
+			 "%s failed to enumerate memcpy channels - %d\n",
+			 __func__, ret);
+		goto err_memcpy;
+	}
+
+	/* Register slave channels */
+	ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->slave,
+				pdata->num_channels, true);
+	if (ret <= 0) {
+		dev_warn(&pdev->dev,
+			"%s failed to enumerate slave channels - %d\n",
+				__func__, ret);
+		goto err_slave;
+	}
+
+	ret = dma_async_device_register(&s3cdma->memcpy);
+	if (ret) {
+		dev_warn(&pdev->dev,
+			"%s failed to register memcpy as an async device - %d\n",
+			__func__, ret);
+		goto err_memcpy_reg;
+	}
+
+	ret = dma_async_device_register(&s3cdma->slave);
+	if (ret) {
+		dev_warn(&pdev->dev,
+			"%s failed to register slave as an async device - %d\n",
+			__func__, ret);
+		goto err_slave_reg;
+	}
+
+	platform_set_drvdata(pdev, s3cdma);
+	dev_info(&pdev->dev, "Loaded dma driver with %d physical channels\n",
+		 pdata->num_phy_channels);
+
+	return 0;
+
+err_slave_reg:
+	dma_async_device_unregister(&s3cdma->memcpy);
+err_memcpy_reg:
+	s3c24xx_dma_free_virtual_channels(&s3cdma->slave);
+err_slave:
+	s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy);
+err_memcpy:
+	if (sdata->has_clocks)
+		for (i = 0; i < pdata->num_phy_channels; i++) {
+			struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
+			if (phy->valid)
+				clk_unprepare(phy->clk);
+		}
+
+	return ret;
+}
+
+static int s3c24xx_dma_remove(struct platform_device *pdev)
+{
+	const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev);
+	struct s3c24xx_dma_engine *s3cdma = platform_get_drvdata(pdev);
+	struct soc_data *sdata = s3c24xx_dma_get_soc_data(pdev);
+	int i;
+
+	dma_async_device_unregister(&s3cdma->slave);
+	dma_async_device_unregister(&s3cdma->memcpy);
+
+	s3c24xx_dma_free_virtual_channels(&s3cdma->slave);
+	s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy);
+
+	if (sdata->has_clocks)
+		for (i = 0; i < pdata->num_phy_channels; i++) {
+			struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
+			if (phy->valid)
+				clk_unprepare(phy->clk);
+		}
+
+	return 0;
+}
+
+static struct platform_driver s3c24xx_dma_driver = {
+	.driver		= {
+		.name	= "s3c24xx-dma",
+		.owner	= THIS_MODULE,
+	},
+	.id_table	= s3c24xx_dma_driver_ids,
+	.probe		= s3c24xx_dma_probe,
+	.remove		= s3c24xx_dma_remove,
+};
+
+module_platform_driver(s3c24xx_dma_driver);
+
+bool s3c24xx_dma_filter(struct dma_chan *chan, void *param)
+{
+	struct s3c24xx_dma_chan *s3cchan;
+
+	if (chan->device->dev->driver != &s3c24xx_dma_driver.driver)
+		return false;
+
+	s3cchan = to_s3c24xx_dma_chan(chan);
+
+	return s3cchan->id == (int)param;
+}
+EXPORT_SYMBOL(s3c24xx_dma_filter);
+
+MODULE_DESCRIPTION("S3C24XX DMA Driver");
+MODULE_AUTHOR("Heiko Stuebner");
+MODULE_LICENSE("GPL v2");

+ 1 - 2
drivers/gpu/drm/i2c/tda998x_drv.c

@@ -707,8 +707,7 @@ tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
 		reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
 		reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
 		break;
 		break;
 	case DRM_MODE_DPMS_OFF:
 	case DRM_MODE_DPMS_OFF:
-		/* disable audio and video ports */
-		reg_write(encoder, REG_ENA_AP, 0x00);
+		/* disable video ports */
 		reg_write(encoder, REG_ENA_VP_0, 0x00);
 		reg_write(encoder, REG_ENA_VP_0, 0x00);
 		reg_write(encoder, REG_ENA_VP_1, 0x00);
 		reg_write(encoder, REG_ENA_VP_1, 0x00);
 		reg_write(encoder, REG_ENA_VP_2, 0x00);
 		reg_write(encoder, REG_ENA_VP_2, 0x00);

+ 4 - 4
drivers/gpu/drm/i915/i915_gem.c

@@ -4800,10 +4800,10 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
 
 
 	if (!mutex_trylock(&dev->struct_mutex)) {
 	if (!mutex_trylock(&dev->struct_mutex)) {
 		if (!mutex_is_locked_by(&dev->struct_mutex, current))
 		if (!mutex_is_locked_by(&dev->struct_mutex, current))
-			return SHRINK_STOP;
+			return 0;
 
 
 		if (dev_priv->mm.shrinker_no_lock_stealing)
 		if (dev_priv->mm.shrinker_no_lock_stealing)
-			return SHRINK_STOP;
+			return 0;
 
 
 		unlock = false;
 		unlock = false;
 	}
 	}
@@ -4901,10 +4901,10 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
 
 
 	if (!mutex_trylock(&dev->struct_mutex)) {
 	if (!mutex_trylock(&dev->struct_mutex)) {
 		if (!mutex_is_locked_by(&dev->struct_mutex, current))
 		if (!mutex_is_locked_by(&dev->struct_mutex, current))
-			return 0;
+			return SHRINK_STOP;
 
 
 		if (dev_priv->mm.shrinker_no_lock_stealing)
 		if (dev_priv->mm.shrinker_no_lock_stealing)
-			return 0;
+			return SHRINK_STOP;
 
 
 		unlock = false;
 		unlock = false;
 	}
 	}

+ 4 - 2
drivers/gpu/drm/i915/i915_gpu_error.c

@@ -143,8 +143,10 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
 
 
 	/* Seek the first printf which is hits start position */
 	/* Seek the first printf which is hits start position */
 	if (e->pos < e->start) {
 	if (e->pos < e->start) {
-		len = vsnprintf(NULL, 0, f, args);
-		if (!__i915_error_seek(e, len))
+		va_list tmp;
+
+		va_copy(tmp, args);
+		if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp)))
 			return;
 			return;
 	}
 	}
 
 

+ 4 - 0
drivers/gpu/drm/i915/intel_display.c

@@ -4775,6 +4775,10 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
 
 
 	pipeconf = 0;
 	pipeconf = 0;
 
 
+	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
+	    I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
+		pipeconf |= PIPECONF_ENABLE;
+
 	if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) {
 	if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) {
 		/* Enable pixel doubling when the dot clock is > 90% of the (display)
 		/* Enable pixel doubling when the dot clock is > 90% of the (display)
 		 * core speed.
 		 * core speed.

+ 12 - 1
drivers/gpu/drm/i915/intel_dp.c

@@ -588,7 +588,18 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
 			DRM_DEBUG_KMS("aux_ch native nack\n");
 			DRM_DEBUG_KMS("aux_ch native nack\n");
 			return -EREMOTEIO;
 			return -EREMOTEIO;
 		case AUX_NATIVE_REPLY_DEFER:
 		case AUX_NATIVE_REPLY_DEFER:
-			udelay(100);
+			/*
+			 * For now, just give more slack to branch devices. We
+			 * could check the DPCD for I2C bit rate capabilities,
+			 * and if available, adjust the interval. We could also
+			 * be more careful with DP-to-Legacy adapters where a
+			 * long legacy cable may force very low I2C bit rates.
+			 */
+			if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+			    DP_DWN_STRM_PORT_PRESENT)
+				usleep_range(500, 600);
+			else
+				usleep_range(300, 400);
 			continue;
 			continue;
 		default:
 		default:
 			DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
 			DRM_ERROR("aux_ch invalid native reply 0x%02x\n",

+ 8 - 0
drivers/gpu/drm/i915/intel_tv.c

@@ -916,6 +916,14 @@ intel_tv_compute_config(struct intel_encoder *encoder,
 	DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
 	DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
 	pipe_config->pipe_bpp = 8*3;
 	pipe_config->pipe_bpp = 8*3;
 
 
+	/* TV has it's own notion of sync and other mode flags, so clear them. */
+	pipe_config->adjusted_mode.flags = 0;
+
+	/*
+	 * FIXME: We don't check whether the input mode is actually what we want
+	 * or whether userspace is doing something stupid.
+	 */
+
 	return true;
 	return true;
 }
 }
 
 

+ 0 - 2
drivers/gpu/drm/msm/mdp4/mdp4_kms.c

@@ -19,8 +19,6 @@
 #include "msm_drv.h"
 #include "msm_drv.h"
 #include "mdp4_kms.h"
 #include "mdp4_kms.h"
 
 
-#include <mach/iommu.h>
-
 static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
 static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
 
 
 static int mdp4_hw_init(struct msm_kms *kms)
 static int mdp4_hw_init(struct msm_kms *kms)

+ 4 - 4
drivers/gpu/drm/msm/msm_drv.c

@@ -18,8 +18,6 @@
 #include "msm_drv.h"
 #include "msm_drv.h"
 #include "msm_gpu.h"
 #include "msm_gpu.h"
 
 
-#include <mach/iommu.h>
-
 static void msm_fb_output_poll_changed(struct drm_device *dev)
 static void msm_fb_output_poll_changed(struct drm_device *dev)
 {
 {
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_drm_private *priv = dev->dev_private;
@@ -62,6 +60,8 @@ int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
 	int i, ret;
 	int i, ret;
 
 
 	for (i = 0; i < cnt; i++) {
 	for (i = 0; i < cnt; i++) {
+		/* TODO maybe some day msm iommu won't require this hack: */
+		struct device *msm_iommu_get_ctx(const char *ctx_name);
 		struct device *ctx = msm_iommu_get_ctx(names[i]);
 		struct device *ctx = msm_iommu_get_ctx(names[i]);
 		if (!ctx)
 		if (!ctx)
 			continue;
 			continue;
@@ -199,7 +199,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
 		 * imx drm driver on iMX5
 		 * imx drm driver on iMX5
 		 */
 		 */
 		dev_err(dev->dev, "failed to load kms\n");
 		dev_err(dev->dev, "failed to load kms\n");
-		ret = PTR_ERR(priv->kms);
+		ret = PTR_ERR(kms);
 		goto fail;
 		goto fail;
 	}
 	}
 
 
@@ -697,7 +697,7 @@ static struct drm_driver msm_driver = {
 	.gem_vm_ops         = &vm_ops,
 	.gem_vm_ops         = &vm_ops,
 	.dumb_create        = msm_gem_dumb_create,
 	.dumb_create        = msm_gem_dumb_create,
 	.dumb_map_offset    = msm_gem_dumb_map_offset,
 	.dumb_map_offset    = msm_gem_dumb_map_offset,
-	.dumb_destroy       = msm_gem_dumb_destroy,
+	.dumb_destroy       = drm_gem_dumb_destroy,
 #ifdef CONFIG_DEBUG_FS
 #ifdef CONFIG_DEBUG_FS
 	.debugfs_init       = msm_debugfs_init,
 	.debugfs_init       = msm_debugfs_init,
 	.debugfs_cleanup    = msm_debugfs_cleanup,
 	.debugfs_cleanup    = msm_debugfs_cleanup,

+ 0 - 7
drivers/gpu/drm/msm/msm_gem.c

@@ -319,13 +319,6 @@ int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
 			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
 }
 }
 
 
-int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
-		uint32_t handle)
-{
-	/* No special work needed, drop the reference and see what falls out */
-	return drm_gem_handle_delete(file, handle);
-}
-
 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 		uint32_t handle, uint64_t *offset)
 		uint32_t handle, uint64_t *offset)
 {
 {

+ 51 - 0
drivers/gpu/drm/radeon/btc_dpm.c

@@ -1168,6 +1168,23 @@ static const struct radeon_blacklist_clocks btc_blacklist_clocks[] =
         { 25000, 30000, RADEON_SCLK_UP }
         { 25000, 30000, RADEON_SCLK_UP }
 };
 };
 
 
+void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
+						     u32 *max_clock)
+{
+	u32 i, clock = 0;
+
+	if ((table == NULL) || (table->count == 0)) {
+		*max_clock = clock;
+		return;
+	}
+
+	for (i = 0; i < table->count; i++) {
+		if (clock < table->entries[i].clk)
+			clock = table->entries[i].clk;
+	}
+	*max_clock = clock;
+}
+
 void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
 void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
 					u32 clock, u16 max_voltage, u16 *voltage)
 					u32 clock, u16 max_voltage, u16 *voltage)
 {
 {
@@ -2080,6 +2097,7 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev,
 	bool disable_mclk_switching;
 	bool disable_mclk_switching;
 	u32 mclk, sclk;
 	u32 mclk, sclk;
 	u16 vddc, vddci;
 	u16 vddc, vddci;
+	u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
 
 
 	if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
 	if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
 	    btc_dpm_vblank_too_short(rdev))
 	    btc_dpm_vblank_too_short(rdev))
@@ -2121,6 +2139,39 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev,
 			ps->low.vddci = max_limits->vddci;
 			ps->low.vddci = max_limits->vddci;
 	}
 	}
 
 
+	/* limit clocks to max supported clocks based on voltage dependency tables */
+	btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+							&max_sclk_vddc);
+	btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+							&max_mclk_vddci);
+	btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+							&max_mclk_vddc);
+
+	if (max_sclk_vddc) {
+		if (ps->low.sclk > max_sclk_vddc)
+			ps->low.sclk = max_sclk_vddc;
+		if (ps->medium.sclk > max_sclk_vddc)
+			ps->medium.sclk = max_sclk_vddc;
+		if (ps->high.sclk > max_sclk_vddc)
+			ps->high.sclk = max_sclk_vddc;
+	}
+	if (max_mclk_vddci) {
+		if (ps->low.mclk > max_mclk_vddci)
+			ps->low.mclk = max_mclk_vddci;
+		if (ps->medium.mclk > max_mclk_vddci)
+			ps->medium.mclk = max_mclk_vddci;
+		if (ps->high.mclk > max_mclk_vddci)
+			ps->high.mclk = max_mclk_vddci;
+	}
+	if (max_mclk_vddc) {
+		if (ps->low.mclk > max_mclk_vddc)
+			ps->low.mclk = max_mclk_vddc;
+		if (ps->medium.mclk > max_mclk_vddc)
+			ps->medium.mclk = max_mclk_vddc;
+		if (ps->high.mclk > max_mclk_vddc)
+			ps->high.mclk = max_mclk_vddc;
+	}
+
 	/* XXX validate the min clocks required for display */
 	/* XXX validate the min clocks required for display */
 
 
 	if (disable_mclk_switching) {
 	if (disable_mclk_switching) {

+ 2 - 0
drivers/gpu/drm/radeon/btc_dpm.h

@@ -46,6 +46,8 @@ void btc_adjust_clock_combinations(struct radeon_device *rdev,
 				   struct rv7xx_pl *pl);
 				   struct rv7xx_pl *pl);
 void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
 void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
 					u32 clock, u16 max_voltage, u16 *voltage);
 					u32 clock, u16 max_voltage, u16 *voltage);
+void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
+						     u32 *max_clock);
 void btc_apply_voltage_delta_rules(struct radeon_device *rdev,
 void btc_apply_voltage_delta_rules(struct radeon_device *rdev,
 				   u16 max_vddc, u16 max_vddci,
 				   u16 max_vddc, u16 max_vddci,
 				   u16 *vddc, u16 *vddci);
 				   u16 *vddc, u16 *vddci);

+ 26 - 0
drivers/gpu/drm/radeon/ci_dpm.c

@@ -146,6 +146,8 @@ static const struct ci_pt_config_reg didt_config_ci[] =
 };
 };
 
 
 extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
 extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
+extern void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
+							    u32 *max_clock);
 extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
 extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
 				       u32 arb_freq_src, u32 arb_freq_dest);
 				       u32 arb_freq_src, u32 arb_freq_dest);
 extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
 extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
@@ -712,6 +714,7 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
 	struct radeon_clock_and_voltage_limits *max_limits;
 	struct radeon_clock_and_voltage_limits *max_limits;
 	bool disable_mclk_switching;
 	bool disable_mclk_switching;
 	u32 sclk, mclk;
 	u32 sclk, mclk;
+	u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
 	int i;
 	int i;
 
 
 	if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
 	if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -739,6 +742,29 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
 		}
 		}
 	}
 	}
 
 
+	/* limit clocks to max supported clocks based on voltage dependency tables */
+	btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+							&max_sclk_vddc);
+	btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+							&max_mclk_vddci);
+	btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+							&max_mclk_vddc);
+
+	for (i = 0; i < ps->performance_level_count; i++) {
+		if (max_sclk_vddc) {
+			if (ps->performance_levels[i].sclk > max_sclk_vddc)
+				ps->performance_levels[i].sclk = max_sclk_vddc;
+		}
+		if (max_mclk_vddci) {
+			if (ps->performance_levels[i].mclk > max_mclk_vddci)
+				ps->performance_levels[i].mclk = max_mclk_vddci;
+		}
+		if (max_mclk_vddc) {
+			if (ps->performance_levels[i].mclk > max_mclk_vddc)
+				ps->performance_levels[i].mclk = max_mclk_vddc;
+		}
+	}
+
 	/* XXX validate the min clocks required for display */
 	/* XXX validate the min clocks required for display */
 
 
 	if (disable_mclk_switching) {
 	if (disable_mclk_switching) {

+ 8 - 9
drivers/gpu/drm/radeon/cik.c

@@ -2845,10 +2845,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
 		rdev->config.cik.tile_config |= (3 << 0);
 		rdev->config.cik.tile_config |= (3 << 0);
 		break;
 		break;
 	}
 	}
-	if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
-		rdev->config.cik.tile_config |= 1 << 4;
-	else
-		rdev->config.cik.tile_config |= 0 << 4;
+	rdev->config.cik.tile_config |=
+		((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
 	rdev->config.cik.tile_config |=
 	rdev->config.cik.tile_config |=
 		((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
 		((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
 	rdev->config.cik.tile_config |=
 	rdev->config.cik.tile_config |=
@@ -4456,8 +4454,8 @@ static int cik_mc_init(struct radeon_device *rdev)
 	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
 	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
 	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
 	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
 	/* size in MB on si */
 	/* size in MB on si */
-	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
-	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
 	rdev->mc.visible_vram_size = rdev->mc.aper_size;
 	rdev->mc.visible_vram_size = rdev->mc.aper_size;
 	si_vram_gtt_location(rdev, &rdev->mc);
 	si_vram_gtt_location(rdev, &rdev->mc);
 	radeon_update_bandwidth_info(rdev);
 	radeon_update_bandwidth_info(rdev);
@@ -4735,12 +4733,13 @@ static void cik_vm_decode_fault(struct radeon_device *rdev,
 	u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
 	u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
 	u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
 	u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
 	u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
 	u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
-	char *block = (char *)&mc_client;
+	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
+		(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
 
 
-	printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
+	printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
 	       protections, vmid, addr,
 	       protections, vmid, addr,
 	       (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
 	       (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
-	       block, mc_id);
+	       block, mc_client, mc_id);
 }
 }
 
 
 /**
 /**

+ 24 - 0
drivers/gpu/drm/radeon/ni_dpm.c

@@ -787,6 +787,7 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
 	bool disable_mclk_switching;
 	bool disable_mclk_switching;
 	u32 mclk, sclk;
 	u32 mclk, sclk;
 	u16 vddc, vddci;
 	u16 vddc, vddci;
+	u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
 	int i;
 	int i;
 
 
 	if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
 	if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -813,6 +814,29 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
 		}
 		}
 	}
 	}
 
 
+	/* limit clocks to max supported clocks based on voltage dependency tables */
+	btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+							&max_sclk_vddc);
+	btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+							&max_mclk_vddci);
+	btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+							&max_mclk_vddc);
+
+	for (i = 0; i < ps->performance_level_count; i++) {
+		if (max_sclk_vddc) {
+			if (ps->performance_levels[i].sclk > max_sclk_vddc)
+				ps->performance_levels[i].sclk = max_sclk_vddc;
+		}
+		if (max_mclk_vddci) {
+			if (ps->performance_levels[i].mclk > max_mclk_vddci)
+				ps->performance_levels[i].mclk = max_mclk_vddci;
+		}
+		if (max_mclk_vddc) {
+			if (ps->performance_levels[i].mclk > max_mclk_vddc)
+				ps->performance_levels[i].mclk = max_mclk_vddc;
+		}
+	}
+
 	/* XXX validate the min clocks required for display */
 	/* XXX validate the min clocks required for display */
 
 
 	if (disable_mclk_switching) {
 	if (disable_mclk_switching) {

+ 5 - 3
drivers/gpu/drm/radeon/r100.c

@@ -2933,9 +2933,11 @@ static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
 	seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
 	seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
 	seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
 	seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
 	seq_printf(m, "%u dwords in ring\n", count);
 	seq_printf(m, "%u dwords in ring\n", count);
-	for (j = 0; j <= count; j++) {
-		i = (rdp + j) & ring->ptr_mask;
-		seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
+	if (ring->ready) {
+		for (j = 0; j <= count; j++) {
+			i = (rdp + j) & ring->ptr_mask;
+			seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
+		}
 	}
 	}
 	return 0;
 	return 0;
 }
 }

+ 1 - 1
drivers/gpu/drm/radeon/r600_dpm.c

@@ -1084,7 +1084,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
 				rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
 				rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
 					le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
 					le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
 				rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
 				rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
-					le16_to_cpu(limits->entries[i].usVoltage);
+					le16_to_cpu(entry->usVoltage);
 				entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
 				entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
 					((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
 					((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
 			}
 			}

+ 15 - 5
drivers/gpu/drm/radeon/r600_hdmi.c

@@ -257,10 +257,7 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
 	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
 	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
 	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
 	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
 	 */
 	 */
-	if (ASIC_IS_DCE3(rdev)) {
-		/* according to the reg specs, this should DCE3.2 only, but in
-		 * practice it seems to cover DCE3.0 as well.
-		 */
+	if (ASIC_IS_DCE32(rdev)) {
 		if (dig->dig_encoder == 0) {
 		if (dig->dig_encoder == 0) {
 			dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
 			dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
 			dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
 			dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
@@ -276,8 +273,21 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
 			WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo);
 			WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo);
 			WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
 			WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
 		}
 		}
+	} else if (ASIC_IS_DCE3(rdev)) {
+		/* according to the reg specs, this should DCE3.2 only, but in
+		 * practice it seems to cover DCE3.0/3.1 as well.
+		 */
+		if (dig->dig_encoder == 0) {
+			WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
+			WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
+			WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
+		} else {
+			WREG32(DCCG_AUDIO_DTO1_PHASE, base_rate * 100);
+			WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
+			WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
+		}
 	} else {
 	} else {
-		/* according to the reg specs, this should be DCE2.0 and DCE3.0 */
+		/* according to the reg specs, this should be DCE2.0 and DCE3.0/3.1 */
 		WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
 		WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
 		       AUDIO_DTO_MODULE(clock / 10));
 		       AUDIO_DTO_MODULE(clock / 10));
 	}
 	}

+ 2 - 0
drivers/gpu/drm/radeon/radeon_asic.c

@@ -1004,6 +1004,8 @@ static struct radeon_asic rv6xx_asic = {
 		.wait_for_vblank = &avivo_wait_for_vblank,
 		.wait_for_vblank = &avivo_wait_for_vblank,
 		.set_backlight_level = &atombios_set_backlight_level,
 		.set_backlight_level = &atombios_set_backlight_level,
 		.get_backlight_level = &atombios_get_backlight_level,
 		.get_backlight_level = &atombios_get_backlight_level,
+		.hdmi_enable = &r600_hdmi_enable,
+		.hdmi_setmode = &r600_hdmi_setmode,
 	},
 	},
 	.copy = {
 	.copy = {
 		.blit = &r600_copy_cpdma,
 		.blit = &r600_copy_cpdma,

+ 43 - 23
drivers/gpu/drm/radeon/radeon_atombios.c

@@ -1367,6 +1367,7 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
 	int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info);
 	int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info);
 	uint16_t data_offset, size;
 	uint16_t data_offset, size;
 	struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
 	struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
+	struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT *ss_assign;
 	uint8_t frev, crev;
 	uint8_t frev, crev;
 	int i, num_indices;
 	int i, num_indices;
 
 
@@ -1378,18 +1379,21 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
 
 
 		num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
 		num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
 			sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
 			sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
-
+		ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
+			((u8 *)&ss_info->asSS_Info[0]);
 		for (i = 0; i < num_indices; i++) {
 		for (i = 0; i < num_indices; i++) {
-			if (ss_info->asSS_Info[i].ucSS_Id == id) {
+			if (ss_assign->ucSS_Id == id) {
 				ss->percentage =
 				ss->percentage =
-					le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage);
-				ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType;
-				ss->step = ss_info->asSS_Info[i].ucSS_Step;
-				ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
-				ss->range = ss_info->asSS_Info[i].ucSS_Range;
-				ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
+					le16_to_cpu(ss_assign->usSpreadSpectrumPercentage);
+				ss->type = ss_assign->ucSpreadSpectrumType;
+				ss->step = ss_assign->ucSS_Step;
+				ss->delay = ss_assign->ucSS_Delay;
+				ss->range = ss_assign->ucSS_Range;
+				ss->refdiv = ss_assign->ucRecommendedRef_Div;
 				return true;
 				return true;
 			}
 			}
+			ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
+				((u8 *)ss_assign + sizeof(struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT));
 		}
 		}
 	}
 	}
 	return false;
 	return false;
@@ -1477,6 +1481,12 @@ union asic_ss_info {
 	struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3;
 	struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3;
 };
 };
 
 
+union asic_ss_assignment {
+	struct _ATOM_ASIC_SS_ASSIGNMENT v1;
+	struct _ATOM_ASIC_SS_ASSIGNMENT_V2 v2;
+	struct _ATOM_ASIC_SS_ASSIGNMENT_V3 v3;
+};
+
 bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
 bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
 				      struct radeon_atom_ss *ss,
 				      struct radeon_atom_ss *ss,
 				      int id, u32 clock)
 				      int id, u32 clock)
@@ -1485,6 +1495,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
 	int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
 	int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
 	uint16_t data_offset, size;
 	uint16_t data_offset, size;
 	union asic_ss_info *ss_info;
 	union asic_ss_info *ss_info;
+	union asic_ss_assignment *ss_assign;
 	uint8_t frev, crev;
 	uint8_t frev, crev;
 	int i, num_indices;
 	int i, num_indices;
 
 
@@ -1509,45 +1520,52 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
 			num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
 			num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
 				sizeof(ATOM_ASIC_SS_ASSIGNMENT);
 				sizeof(ATOM_ASIC_SS_ASSIGNMENT);
 
 
+			ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info.asSpreadSpectrum[0]);
 			for (i = 0; i < num_indices; i++) {
 			for (i = 0; i < num_indices; i++) {
-				if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) &&
-				    (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) {
+				if ((ss_assign->v1.ucClockIndication == id) &&
+				    (clock <= le32_to_cpu(ss_assign->v1.ulTargetClockRange))) {
 					ss->percentage =
 					ss->percentage =
-						le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
-					ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode;
-					ss->rate = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz);
+						le16_to_cpu(ss_assign->v1.usSpreadSpectrumPercentage);
+					ss->type = ss_assign->v1.ucSpreadSpectrumMode;
+					ss->rate = le16_to_cpu(ss_assign->v1.usSpreadRateInKhz);
 					return true;
 					return true;
 				}
 				}
+				ss_assign = (union asic_ss_assignment *)
+					((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT));
 			}
 			}
 			break;
 			break;
 		case 2:
 		case 2:
 			num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
 			num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
 				sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
 				sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
+			ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_2.asSpreadSpectrum[0]);
 			for (i = 0; i < num_indices; i++) {
 			for (i = 0; i < num_indices; i++) {
-				if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) &&
-				    (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) {
+				if ((ss_assign->v2.ucClockIndication == id) &&
+				    (clock <= le32_to_cpu(ss_assign->v2.ulTargetClockRange))) {
 					ss->percentage =
 					ss->percentage =
-						le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
-					ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode;
-					ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz);
+						le16_to_cpu(ss_assign->v2.usSpreadSpectrumPercentage);
+					ss->type = ss_assign->v2.ucSpreadSpectrumMode;
+					ss->rate = le16_to_cpu(ss_assign->v2.usSpreadRateIn10Hz);
 					if ((crev == 2) &&
 					if ((crev == 2) &&
 					    ((id == ASIC_INTERNAL_ENGINE_SS) ||
 					    ((id == ASIC_INTERNAL_ENGINE_SS) ||
 					     (id == ASIC_INTERNAL_MEMORY_SS)))
 					     (id == ASIC_INTERNAL_MEMORY_SS)))
 						ss->rate /= 100;
 						ss->rate /= 100;
 					return true;
 					return true;
 				}
 				}
+				ss_assign = (union asic_ss_assignment *)
+					((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2));
 			}
 			}
 			break;
 			break;
 		case 3:
 		case 3:
 			num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
 			num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
 				sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
 				sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
+			ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_3.asSpreadSpectrum[0]);
 			for (i = 0; i < num_indices; i++) {
 			for (i = 0; i < num_indices; i++) {
-				if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) &&
-				    (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) {
+				if ((ss_assign->v3.ucClockIndication == id) &&
+				    (clock <= le32_to_cpu(ss_assign->v3.ulTargetClockRange))) {
 					ss->percentage =
 					ss->percentage =
-						le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
-					ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
-					ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz);
+						le16_to_cpu(ss_assign->v3.usSpreadSpectrumPercentage);
+					ss->type = ss_assign->v3.ucSpreadSpectrumMode;
+					ss->rate = le16_to_cpu(ss_assign->v3.usSpreadRateIn10Hz);
 					if ((id == ASIC_INTERNAL_ENGINE_SS) ||
 					if ((id == ASIC_INTERNAL_ENGINE_SS) ||
 					    (id == ASIC_INTERNAL_MEMORY_SS))
 					    (id == ASIC_INTERNAL_MEMORY_SS))
 						ss->rate /= 100;
 						ss->rate /= 100;
@@ -1555,6 +1573,8 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
 						radeon_atombios_get_igp_ss_overrides(rdev, ss, id);
 						radeon_atombios_get_igp_ss_overrides(rdev, ss, id);
 					return true;
 					return true;
 				}
 				}
+				ss_assign = (union asic_ss_assignment *)
+					((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3));
 			}
 			}
 			break;
 			break;
 		default:
 		default:

+ 3 - 2
drivers/gpu/drm/radeon/radeon_cs.c

@@ -85,8 +85,9 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
 		   VRAM, also but everything into VRAM on AGP cards to avoid
 		   VRAM, also but everything into VRAM on AGP cards to avoid
 		   image corruptions */
 		   image corruptions */
 		if (p->ring == R600_RING_TYPE_UVD_INDEX &&
 		if (p->ring == R600_RING_TYPE_UVD_INDEX &&
-		    (i == 0 || p->rdev->flags & RADEON_IS_AGP)) {
-			/* TODO: is this still needed for NI+ ? */
+		    p->rdev->family < CHIP_PALM &&
+		    (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) {
+
 			p->relocs[i].lobj.domain =
 			p->relocs[i].lobj.domain =
 				RADEON_GEM_DOMAIN_VRAM;
 				RADEON_GEM_DOMAIN_VRAM;
 
 

+ 12 - 3
drivers/gpu/drm/radeon/radeon_device.c

@@ -1320,13 +1320,22 @@ int radeon_device_init(struct radeon_device *rdev,
 			return r;
 			return r;
 	}
 	}
 	if ((radeon_testing & 1)) {
 	if ((radeon_testing & 1)) {
-		radeon_test_moves(rdev);
+		if (rdev->accel_working)
+			radeon_test_moves(rdev);
+		else
+			DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
 	}
 	}
 	if ((radeon_testing & 2)) {
 	if ((radeon_testing & 2)) {
-		radeon_test_syncing(rdev);
+		if (rdev->accel_working)
+			radeon_test_syncing(rdev);
+		else
+			DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
 	}
 	}
 	if (radeon_benchmarking) {
 	if (radeon_benchmarking) {
-		radeon_benchmark(rdev, radeon_benchmarking);
+		if (rdev->accel_working)
+			radeon_benchmark(rdev, radeon_benchmarking);
+		else
+			DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
 	}
 	}
 	return 0;
 	return 0;
 }
 }

+ 4 - 4
drivers/gpu/drm/radeon/radeon_pm.c

@@ -1002,7 +1002,7 @@ static void radeon_pm_resume_old(struct radeon_device *rdev)
 {
 {
 	/* set up the default clocks if the MC ucode is loaded */
 	/* set up the default clocks if the MC ucode is loaded */
 	if ((rdev->family >= CHIP_BARTS) &&
 	if ((rdev->family >= CHIP_BARTS) &&
-	    (rdev->family <= CHIP_HAINAN) &&
+	    (rdev->family <= CHIP_CAYMAN) &&
 	    rdev->mc_fw) {
 	    rdev->mc_fw) {
 		if (rdev->pm.default_vddc)
 		if (rdev->pm.default_vddc)
 			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
 			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@@ -1046,7 +1046,7 @@ static void radeon_pm_resume_dpm(struct radeon_device *rdev)
 	if (ret) {
 	if (ret) {
 		DRM_ERROR("radeon: dpm resume failed\n");
 		DRM_ERROR("radeon: dpm resume failed\n");
 		if ((rdev->family >= CHIP_BARTS) &&
 		if ((rdev->family >= CHIP_BARTS) &&
-		    (rdev->family <= CHIP_HAINAN) &&
+		    (rdev->family <= CHIP_CAYMAN) &&
 		    rdev->mc_fw) {
 		    rdev->mc_fw) {
 			if (rdev->pm.default_vddc)
 			if (rdev->pm.default_vddc)
 				radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
 				radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@@ -1097,7 +1097,7 @@ static int radeon_pm_init_old(struct radeon_device *rdev)
 		radeon_pm_init_profile(rdev);
 		radeon_pm_init_profile(rdev);
 		/* set up the default clocks if the MC ucode is loaded */
 		/* set up the default clocks if the MC ucode is loaded */
 		if ((rdev->family >= CHIP_BARTS) &&
 		if ((rdev->family >= CHIP_BARTS) &&
-		    (rdev->family <= CHIP_HAINAN) &&
+		    (rdev->family <= CHIP_CAYMAN) &&
 		    rdev->mc_fw) {
 		    rdev->mc_fw) {
 			if (rdev->pm.default_vddc)
 			if (rdev->pm.default_vddc)
 				radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
 				radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@@ -1183,7 +1183,7 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
 	if (ret) {
 	if (ret) {
 		rdev->pm.dpm_enabled = false;
 		rdev->pm.dpm_enabled = false;
 		if ((rdev->family >= CHIP_BARTS) &&
 		if ((rdev->family >= CHIP_BARTS) &&
-		    (rdev->family <= CHIP_HAINAN) &&
+		    (rdev->family <= CHIP_CAYMAN) &&
 		    rdev->mc_fw) {
 		    rdev->mc_fw) {
 			if (rdev->pm.default_vddc)
 			if (rdev->pm.default_vddc)
 				radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
 				radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,

+ 5 - 3
drivers/gpu/drm/radeon/radeon_ring.c

@@ -839,9 +839,11 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
 	 * packet that is the root issue
 	 * packet that is the root issue
 	 */
 	 */
 	i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
 	i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
-	for (j = 0; j <= (count + 32); j++) {
-		seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
-		i = (i + 1) & ring->ptr_mask;
+	if (ring->ready) {
+		for (j = 0; j <= (count + 32); j++) {
+			seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
+			i = (i + 1) & ring->ptr_mask;
+		}
 	}
 	}
 	return 0;
 	return 0;
 }
 }

+ 1 - 2
drivers/gpu/drm/radeon/radeon_uvd.c

@@ -476,8 +476,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	/* TODO: is this still necessary on NI+ ? */
-	if ((cmd == 0 || cmd == 0x3) &&
+	if (p->rdev->family < CHIP_PALM && (cmd == 0 || cmd == 0x3) &&
 	    (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
 	    (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
 		DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
 		DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
 			  start, end);
 			  start, end);

+ 24 - 0
drivers/gpu/drm/radeon/si_dpm.c

@@ -2910,6 +2910,7 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
 	bool disable_sclk_switching = false;
 	bool disable_sclk_switching = false;
 	u32 mclk, sclk;
 	u32 mclk, sclk;
 	u16 vddc, vddci;
 	u16 vddc, vddci;
+	u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
 	int i;
 	int i;
 
 
 	if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
 	if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -2943,6 +2944,29 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
 		}
 		}
 	}
 	}
 
 
+	/* limit clocks to max supported clocks based on voltage dependency tables */
+	btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+							&max_sclk_vddc);
+	btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+							&max_mclk_vddci);
+	btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+							&max_mclk_vddc);
+
+	for (i = 0; i < ps->performance_level_count; i++) {
+		if (max_sclk_vddc) {
+			if (ps->performance_levels[i].sclk > max_sclk_vddc)
+				ps->performance_levels[i].sclk = max_sclk_vddc;
+		}
+		if (max_mclk_vddci) {
+			if (ps->performance_levels[i].mclk > max_mclk_vddci)
+				ps->performance_levels[i].mclk = max_mclk_vddci;
+		}
+		if (max_mclk_vddc) {
+			if (ps->performance_levels[i].mclk > max_mclk_vddc)
+				ps->performance_levels[i].mclk = max_mclk_vddc;
+		}
+	}
+
 	/* XXX validate the min clocks required for display */
 	/* XXX validate the min clocks required for display */
 
 
 	if (disable_mclk_switching) {
 	if (disable_mclk_switching) {

部分文件因为文件数量过多而无法显示