Browse Source

Merge branch 'drm-intel-next' of git://people.freedesktop.org/~danvet/drm-intel into drm-next

drm-intel-next-2014-01-10:
- final bits for runtime D3 on Haswell from Paul (now enabled fully)
- parse the backlight modulation freq information in the VBT from Jani
  (but not yet used)
- more watermark improvements from Ville for ilk-ivb and bdw
- bugfixes for fastboot from Jesse
- watermark fix for i830M (but not yet everything)
- vlv vga hotplug w/a (Imre)
- piles of other small improvements, cleanups and fixes all over

Note that the pull request includes a backmerge of the last drm-fixes
pulled into Linus' tree - things where getting a bit too messy. So the
shortlog also contains a bunch of patches from Linus tree. Please yell if
you want me to frob it for you a bit.

* 'drm-intel-next' of git://people.freedesktop.org/~danvet/drm-intel: (609 commits)
  drm/i915/bdw: make sure south port interrupts are enabled properly v2
  drm/i915: Include more information in disabled hotplug interrupt warning
  drm/i915: Only complain about a rogue hotplug IRQ after disabling
  drm/i915: Only WARN about a stuck hotplug irq ONCE
  drm/i915: s/hotplugt_status_gen4/hotplug_status_g4x/
Dave Airlie 11 years ago
parent
commit
cfd72a4c20
100 changed files with 901 additions and 362 deletions
  1. 72 0
      Documentation/block/null_blk.txt
  2. 2 0
      Documentation/devicetree/bindings/clock/exynos5250-clock.txt
  3. 2 0
      Documentation/kernel-parameters.txt
  4. 240 0
      Documentation/module-signing.txt
  5. 6 2
      Documentation/networking/ip-sysctl.txt
  6. 31 7
      MAINTAINERS
  7. 10 14
      Makefile
  8. 7 1
      arch/arc/include/uapi/asm/unistd.h
  9. 1 1
      arch/arm/boot/dts/exynos5250.dtsi
  10. 14 14
      arch/arm/boot/dts/r8a7790.dtsi
  11. 1 1
      arch/arm/crypto/aesbs-core.S_shipped
  12. 1 1
      arch/arm/crypto/bsaes-armv7.pl
  13. 1 1
      arch/arm/include/asm/io.h
  14. 2 1
      arch/arm/include/asm/memory.h
  15. 1 1
      arch/arm/include/asm/xen/page.h
  16. 7 1
      arch/arm/kernel/traps.c
  17. 3 2
      arch/arm/mach-footbridge/dc21285-timer.c
  18. 6 1
      arch/arm/mach-omap2/board-ldp.c
  19. 38 0
      arch/arm/mach-omap2/display.c
  20. 2 2
      arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
  21. 3 3
      arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
  22. 1 1
      arch/arm/mach-omap2/omap_hwmod_7xx_data.c
  23. 2 0
      arch/arm/mach-pxa/include/mach/lubbock.h
  24. 1 10
      arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c
  25. 9 2
      arch/arm/mach-shmobile/board-armadillo800eva.c
  26. 1 1
      arch/arm/mach-shmobile/board-bockw.c
  27. 1 1
      arch/arm/mach-shmobile/board-kzm9g.c
  28. 3 1
      arch/arm/mach-shmobile/board-lager.c
  29. 2 2
      arch/arm/mach-shmobile/board-mackerel.c
  30. 3 3
      arch/arm/mm/flush.c
  31. 3 3
      arch/arm/xen/enlighten.c
  32. 0 4
      arch/arm64/include/asm/xen/page-coherent.h
  33. 18 20
      arch/arm64/kernel/ptrace.c
  34. 4 8
      arch/parisc/include/asm/cacheflush.h
  35. 2 3
      arch/parisc/include/asm/page.h
  36. 0 35
      arch/parisc/kernel/cache.c
  37. 5 1
      arch/powerpc/boot/dts/mpc5125twr.dts
  38. 1 1
      arch/powerpc/include/asm/exception-64s.h
  39. 4 0
      arch/powerpc/include/asm/kvm_book3s.h
  40. 2 0
      arch/powerpc/include/asm/kvm_book3s_asm.h
  41. 2 2
      arch/powerpc/include/asm/opal.h
  42. 1 1
      arch/powerpc/include/asm/switch_to.h
  43. 6 1
      arch/powerpc/include/asm/unaligned.h
  44. 1 0
      arch/powerpc/kernel/asm-offsets.c
  45. 3 3
      arch/powerpc/kernel/crash_dump.c
  46. 2 0
      arch/powerpc/kernel/head_64.S
  47. 16 16
      arch/powerpc/kernel/process.c
  48. 13 9
      arch/powerpc/kernel/prom_init.c
  49. 2 2
      arch/powerpc/kernel/ptrace.c
  50. 2 2
      arch/powerpc/kernel/setup-common.c
  51. 2 2
      arch/powerpc/kernel/smp.c
  52. 14 4
      arch/powerpc/kvm/book3s_64_mmu_hv.c
  53. 14 10
      arch/powerpc/kvm/book3s_hv.c
  54. 7 2
      arch/powerpc/kvm/book3s_hv_rm_mmu.c
  55. 13 10
      arch/powerpc/kvm/book3s_hv_rmhandlers.S
  56. 11 8
      arch/powerpc/kvm/book3s_interrupts.S
  57. 22 0
      arch/powerpc/kvm/book3s_pr.c
  58. 1 5
      arch/powerpc/kvm/book3s_rmhandlers.S
  59. 6 6
      arch/powerpc/kvm/booke.c
  60. 38 15
      arch/powerpc/lib/copyuser_64.S
  61. 5 15
      arch/powerpc/platforms/powernv/eeh-ioda.c
  62. 6 6
      arch/powerpc/platforms/powernv/opal-lpc.c
  63. 3 1
      arch/powerpc/platforms/powernv/opal-xscom.c
  64. 3 1
      arch/powerpc/platforms/powernv/pci.h
  65. 6 6
      arch/powerpc/platforms/pseries/lparcfg.c
  66. 15 13
      arch/powerpc/platforms/pseries/msi.c
  67. 23 23
      arch/powerpc/platforms/pseries/nvram.c
  68. 4 4
      arch/powerpc/platforms/pseries/pci.c
  69. 0 1
      arch/s390/Kconfig
  70. 2 0
      arch/s390/include/asm/smp.h
  71. 1 0
      arch/s390/kernel/setup.c
  72. 16 9
      arch/s390/kernel/smp.c
  73. 2 0
      arch/s390/pci/pci_event.c
  74. 5 0
      arch/sh/kernel/sh_ksyms_32.c
  75. 1 1
      arch/sh/lib/Makefile
  76. 2 2
      arch/sparc/include/asm/pgtable_64.h
  77. 2 2
      arch/sparc/include/asm/uaccess_64.h
  78. 1 1
      arch/sparc/kernel/iommu.c
  79. 2 3
      arch/sparc/kernel/ioport.c
  80. 1 0
      arch/sparc/kernel/kgdb_64.c
  81. 2 1
      arch/sparc/kernel/smp_64.c
  82. 1 0
      arch/x86/Kconfig
  83. 7 6
      arch/x86/include/asm/fpu-internal.h
  84. 9 2
      arch/x86/include/asm/pgtable.h
  85. 11 0
      arch/x86/include/asm/preempt.h
  86. 2 1
      arch/x86/kernel/cpu/intel.c
  87. 12 3
      arch/x86/kernel/cpu/perf_event.h
  88. 2 2
      arch/x86/kernel/entry_32.S
  89. 1 1
      arch/x86/kernel/entry_64.S
  90. 4 4
      arch/x86/kvm/lapic.c
  91. 1 2
      arch/x86/kvm/vmx.c
  92. 13 0
      arch/x86/mm/gup.c
  93. 13 0
      block/blk-mq-sysfs.c
  94. 0 1
      drivers/acpi/Kconfig
  95. 2 2
      drivers/acpi/ac.c
  96. 1 0
      drivers/acpi/acpi_lpss.c
  97. 0 1
      drivers/acpi/apei/Kconfig
  98. 1 0
      drivers/acpi/apei/erst.c
  99. 21 1
      drivers/acpi/battery.c
  100. 10 0
      drivers/acpi/bus.c

+ 72 - 0
Documentation/block/null_blk.txt

@@ -0,0 +1,72 @@
+Null block device driver
+================================================================================
+
+I. Overview
+
+The null block device (/dev/nullb*) is used for benchmarking the various
+block-layer implementations. It emulates a block device of X gigabytes in size.
+The following instances are possible:
+
+  Single-queue block-layer
+    - Request-based.
+    - Single submission queue per device.
+    - Implements IO scheduling algorithms (CFQ, Deadline, noop).
+  Multi-queue block-layer
+    - Request-based.
+    - Configurable submission queues per device.
+  No block-layer (Known as bio-based)
+    - Bio-based. IO requests are submitted directly to the device driver.
+    - Directly accepts bio data structure and returns them.
+
+All of them have a completion queue for each core in the system.
+
+II. Module parameters applicable for all instances:
+
+queue_mode=[0-2]: Default: 2-Multi-queue
+  Selects which block-layer the module should instantiate with.
+
+  0: Bio-based.
+  1: Single-queue.
+  2: Multi-queue.
+
+home_node=[0--nr_nodes]: Default: NUMA_NO_NODE
+  Selects what CPU node the data structures are allocated from.
+
+gb=[Size in GB]: Default: 250GB
+  The size of the device reported to the system.
+
+bs=[Block size (in bytes)]: Default: 512 bytes
+  The block size reported to the system.
+
+nr_devices=[Number of devices]: Default: 2
+  Number of block devices instantiated. They are instantiated as /dev/nullb0,
+  etc.
+
+irq_mode=[0-2]: Default: 1-Soft-irq
+  The completion mode used for completing IOs to the block-layer.
+
+  0: None.
+  1: Soft-irq. Uses IPI to complete IOs across CPU nodes. Simulates the overhead
+     when IOs are issued from another CPU node than the home the device is
+     connected to.
+  2: Timer: Waits a specific period (completion_nsec) for each IO before
+     completion.
+
+completion_nsec=[ns]: Default: 10.000ns
+  Combined with irq_mode=2 (timer). The time each completion event must wait.
+
+submit_queues=[0..nr_cpus]:
+  The number of submission queues attached to the device driver. If unset, it
+  defaults to 1 on single-queue and bio-based instances. For multi-queue,
+  it is ignored when use_per_node_hctx module parameter is 1.
+
+hw_queue_depth=[0..qdepth]: Default: 64
+  The hardware queue depth of the device.
+
+III: Multi-queue specific parameters
+
+use_per_node_hctx=[0/1]: Default: 0
+  0: The number of submit queues are set to the value of the submit_queues
+     parameter.
+  1: The multi-queue block layer is instantiated with a hardware dispatch
+     queue for each CPU node in the system.

+ 2 - 0
Documentation/devicetree/bindings/clock/exynos5250-clock.txt

@@ -159,6 +159,8 @@ clock which they consume.
   mixer			343
   mixer			343
   hdmi			344
   hdmi			344
   g2d			345
   g2d			345
+  mdma0			346
+  smmu_mdma0		347
 
 
 
 
    [Clock Muxes]
    [Clock Muxes]

+ 2 - 0
Documentation/kernel-parameters.txt

@@ -1529,6 +1529,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 
 
 			* atapi_dmadir: Enable ATAPI DMADIR bridge support
 			* atapi_dmadir: Enable ATAPI DMADIR bridge support
 
 
+			* disable: Disable this device.
+
 			If there are multiple matching configurations changing
 			If there are multiple matching configurations changing
 			the same attribute, the last one is used.
 			the same attribute, the last one is used.
 
 

+ 240 - 0
Documentation/module-signing.txt

@@ -0,0 +1,240 @@
+			==============================
+			KERNEL MODULE SIGNING FACILITY
+			==============================
+
+CONTENTS
+
+ - Overview.
+ - Configuring module signing.
+ - Generating signing keys.
+ - Public keys in the kernel.
+ - Manually signing modules.
+ - Signed modules and stripping.
+ - Loading signed modules.
+ - Non-valid signatures and unsigned modules.
+ - Administering/protecting the private key.
+
+
+========
+OVERVIEW
+========
+
+The kernel module signing facility cryptographically signs modules during
+installation and then checks the signature upon loading the module.  This
+allows increased kernel security by disallowing the loading of unsigned modules
+or modules signed with an invalid key.  Module signing increases security by
+making it harder to load a malicious module into the kernel.  The module
+signature checking is done by the kernel so that it is not necessary to have
+trusted userspace bits.
+
+This facility uses X.509 ITU-T standard certificates to encode the public keys
+involved.  The signatures are not themselves encoded in any industrial standard
+type.  The facility currently only supports the RSA public key encryption
+standard (though it is pluggable and permits others to be used).  The possible
+hash algorithms that can be used are SHA-1, SHA-224, SHA-256, SHA-384, and
+SHA-512 (the algorithm is selected by data in the signature).
+
+
+==========================
+CONFIGURING MODULE SIGNING
+==========================
+
+The module signing facility is enabled by going to the "Enable Loadable Module
+Support" section of the kernel configuration and turning on
+
+	CONFIG_MODULE_SIG	"Module signature verification"
+
+This has a number of options available:
+
+ (1) "Require modules to be validly signed" (CONFIG_MODULE_SIG_FORCE)
+
+     This specifies how the kernel should deal with a module that has a
+     signature for which the key is not known or a module that is unsigned.
+
+     If this is off (ie. "permissive"), then modules for which the key is not
+     available and modules that are unsigned are permitted, but the kernel will
+     be marked as being tainted.
+
+     If this is on (ie. "restrictive"), only modules that have a valid
+     signature that can be verified by a public key in the kernel's possession
+     will be loaded.  All other modules will generate an error.
+
+     Irrespective of the setting here, if the module has a signature block that
+     cannot be parsed, it will be rejected out of hand.
+
+
+ (2) "Automatically sign all modules" (CONFIG_MODULE_SIG_ALL)
+
+     If this is on then modules will be automatically signed during the
+     modules_install phase of a build.  If this is off, then the modules must
+     be signed manually using:
+
+	scripts/sign-file
+
+
+ (3) "Which hash algorithm should modules be signed with?"
+
+     This presents a choice of which hash algorithm the installation phase will
+     sign the modules with:
+
+	CONFIG_SIG_SHA1		"Sign modules with SHA-1"
+	CONFIG_SIG_SHA224	"Sign modules with SHA-224"
+	CONFIG_SIG_SHA256	"Sign modules with SHA-256"
+	CONFIG_SIG_SHA384	"Sign modules with SHA-384"
+	CONFIG_SIG_SHA512	"Sign modules with SHA-512"
+
+     The algorithm selected here will also be built into the kernel (rather
+     than being a module) so that modules signed with that algorithm can have
+     their signatures checked without causing a dependency loop.
+
+
+=======================
+GENERATING SIGNING KEYS
+=======================
+
+Cryptographic keypairs are required to generate and check signatures.  A
+private key is used to generate a signature and the corresponding public key is
+used to check it.  The private key is only needed during the build, after which
+it can be deleted or stored securely.  The public key gets built into the
+kernel so that it can be used to check the signatures as the modules are
+loaded.
+
+Under normal conditions, the kernel build will automatically generate a new
+keypair using openssl if one does not exist in the files:
+
+	signing_key.priv
+	signing_key.x509
+
+during the building of vmlinux (the public part of the key needs to be built
+into vmlinux) using parameters in the:
+
+	x509.genkey
+
+file (which is also generated if it does not already exist).
+
+It is strongly recommended that you provide your own x509.genkey file.
+
+Most notably, in the x509.genkey file, the req_distinguished_name section
+should be altered from the default:
+
+	[ req_distinguished_name ]
+	O = Magrathea
+	CN = Glacier signing key
+	emailAddress = slartibartfast@magrathea.h2g2
+
+The generated RSA key size can also be set with:
+
+	[ req ]
+	default_bits = 4096
+
+
+It is also possible to manually generate the key private/public files using the
+x509.genkey key generation configuration file in the root node of the Linux
+kernel sources tree and the openssl command.  The following is an example to
+generate the public/private key files:
+
+	openssl req -new -nodes -utf8 -sha256 -days 36500 -batch -x509 \
+	   -config x509.genkey -outform DER -out signing_key.x509 \
+	   -keyout signing_key.priv
+
+
+=========================
+PUBLIC KEYS IN THE KERNEL
+=========================
+
+The kernel contains a ring of public keys that can be viewed by root.  They're
+in a keyring called ".system_keyring" that can be seen by:
+
+	[root@deneb ~]# cat /proc/keys
+	...
+	223c7853 I------     1 perm 1f030000     0     0 keyring   .system_keyring: 1
+	302d2d52 I------     1 perm 1f010000     0     0 asymmetri Fedora kernel signing key: d69a84e6bce3d216b979e9505b3e3ef9a7118079: X509.RSA a7118079 []
+	...
+
+Beyond the public key generated specifically for module signing, any file
+placed in the kernel source root directory or the kernel build root directory
+whose name is suffixed with ".x509" will be assumed to be an X.509 public key
+and will be added to the keyring.
+
+Further, the architecture code may take public keys from a hardware store and
+add those in also (e.g. from the UEFI key database).
+
+Finally, it is possible to add additional public keys by doing:
+
+	keyctl padd asymmetric "" [.system_keyring-ID] <[key-file]
+
+e.g.:
+
+	keyctl padd asymmetric "" 0x223c7853 <my_public_key.x509
+
+Note, however, that the kernel will only permit keys to be added to
+.system_keyring _if_ the new key's X.509 wrapper is validly signed by a key
+that is already resident in the .system_keyring at the time the key was added.
+
+
+=========================
+MANUALLY SIGNING MODULES
+=========================
+
+To manually sign a module, use the scripts/sign-file tool available in
+the Linux kernel source tree.  The script requires 4 arguments:
+
+	1.  The hash algorithm (e.g., sha256)
+	2.  The private key filename
+	3.  The public key filename
+	4.  The kernel module to be signed
+
+The following is an example to sign a kernel module:
+
+	scripts/sign-file sha512 kernel-signkey.priv \
+		kernel-signkey.x509 module.ko
+
+The hash algorithm used does not have to match the one configured, but if it
+doesn't, you should make sure that hash algorithm is either built into the
+kernel or can be loaded without requiring itself.
+
+
+============================
+SIGNED MODULES AND STRIPPING
+============================
+
+A signed module has a digital signature simply appended at the end.  The string
+"~Module signature appended~." at the end of the module's file confirms that a
+signature is present but it does not confirm that the signature is valid!
+
+Signed modules are BRITTLE as the signature is outside of the defined ELF
+container.  Thus they MAY NOT be stripped once the signature is computed and
+attached.  Note the entire module is the signed payload, including any and all
+debug information present at the time of signing.
+
+
+======================
+LOADING SIGNED MODULES
+======================
+
+Modules are loaded with insmod, modprobe, init_module() or finit_module(),
+exactly as for unsigned modules as no processing is done in userspace.  The
+signature checking is all done within the kernel.
+
+
+=========================================
+NON-VALID SIGNATURES AND UNSIGNED MODULES
+=========================================
+
+If CONFIG_MODULE_SIG_FORCE is enabled or enforcemodulesig=1 is supplied on
+the kernel command line, the kernel will only load validly signed modules
+for which it has a public key.   Otherwise, it will also load modules that are
+unsigned.   Any module for which the kernel has a key, but which proves to have
+a signature mismatch will not be permitted to load.
+
+Any module that has an unparseable signature will be rejected.
+
+
+=========================================
+ADMINISTERING/PROTECTING THE PRIVATE KEY
+=========================================
+
+Since the private key is used to sign modules, viruses and malware could use
+the private key to sign modules and compromise the operating system.  The
+private key must be either destroyed or moved to a secure location and not kept
+in the root node of the kernel source tree.

+ 6 - 2
Documentation/networking/ip-sysctl.txt

@@ -16,8 +16,12 @@ ip_default_ttl - INTEGER
 	Default: 64 (as recommended by RFC1700)
 	Default: 64 (as recommended by RFC1700)
 
 
 ip_no_pmtu_disc - BOOLEAN
 ip_no_pmtu_disc - BOOLEAN
-	Disable Path MTU Discovery.
-	default FALSE
+	Disable Path MTU Discovery. If enabled and a
+	fragmentation-required ICMP is received, the PMTU to this
+	destination will be set to min_pmtu (see below). You will need
+	to raise min_pmtu to the smallest interface MTU on your system
+	manually if you want to avoid locally generated fragments.
+	Default: FALSE
 
 
 min_pmtu - INTEGER
 min_pmtu - INTEGER
 	default 552 - minimum discovered Path MTU
 	default 552 - minimum discovered Path MTU

+ 31 - 7
MAINTAINERS

@@ -783,7 +783,7 @@ F:	arch/arm/boot/dts/sama*.dts
 F:	arch/arm/boot/dts/sama*.dtsi
 F:	arch/arm/boot/dts/sama*.dtsi
 
 
 ARM/CALXEDA HIGHBANK ARCHITECTURE
 ARM/CALXEDA HIGHBANK ARCHITECTURE
-M:	Rob Herring <rob.herring@calxeda.com>
+M:	Rob Herring <robh@kernel.org>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 S:	Maintained
 F:	arch/arm/mach-highbank/
 F:	arch/arm/mach-highbank/
@@ -1008,6 +1008,8 @@ M:	Santosh Shilimkar <santosh.shilimkar@ti.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 S:	Maintained
 F:	arch/arm/mach-keystone/
 F:	arch/arm/mach-keystone/
+F:	drivers/clk/keystone/
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
 
 
 ARM/LOGICPD PXA270 MACHINE SUPPORT
 ARM/LOGICPD PXA270 MACHINE SUPPORT
 M:	Lennert Buytenhek <kernel@wantstofly.org>
 M:	Lennert Buytenhek <kernel@wantstofly.org>
@@ -1366,6 +1368,9 @@ T:	git git://git.xilinx.com/linux-xlnx.git
 S:	Supported
 S:	Supported
 F:	arch/arm/mach-zynq/
 F:	arch/arm/mach-zynq/
 F:	drivers/cpuidle/cpuidle-zynq.c
 F:	drivers/cpuidle/cpuidle-zynq.c
+N:	zynq
+N:	xilinx
+F:	drivers/clocksource/cadence_ttc_timer.c
 
 
 ARM SMMU DRIVER
 ARM SMMU DRIVER
 M:	Will Deacon <will.deacon@arm.com>
 M:	Will Deacon <will.deacon@arm.com>
@@ -2823,8 +2828,10 @@ F:	include/uapi/drm/
 
 
 INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
 INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
 M:	Daniel Vetter <daniel.vetter@ffwll.ch>
 M:	Daniel Vetter <daniel.vetter@ffwll.ch>
+M:	Jani Nikula <jani.nikula@linux.intel.com>
 L:	intel-gfx@lists.freedesktop.org
 L:	intel-gfx@lists.freedesktop.org
 L:	dri-devel@lists.freedesktop.org
 L:	dri-devel@lists.freedesktop.org
+Q:	http://patchwork.freedesktop.org/project/intel-gfx/
 T:	git git://people.freedesktop.org/~danvet/drm-intel
 T:	git git://people.freedesktop.org/~danvet/drm-intel
 S:	Supported
 S:	Supported
 F:	drivers/gpu/drm/i915/
 F:	drivers/gpu/drm/i915/
@@ -3761,9 +3768,11 @@ F:	include/uapi/linux/gigaset_dev.h
 
 
 GPIO SUBSYSTEM
 GPIO SUBSYSTEM
 M:	Linus Walleij <linus.walleij@linaro.org>
 M:	Linus Walleij <linus.walleij@linaro.org>
-S:	Maintained
+M:	Alexandre Courbot <gnurou@gmail.com>
 L:	linux-gpio@vger.kernel.org
 L:	linux-gpio@vger.kernel.org
-F:	Documentation/gpio.txt
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
+S:	Maintained
+F:	Documentation/gpio/
 F:	drivers/gpio/
 F:	drivers/gpio/
 F:	include/linux/gpio*
 F:	include/linux/gpio*
 F:	include/asm-generic/gpio.h
 F:	include/asm-generic/gpio.h
@@ -3831,6 +3840,12 @@ T:	git git://linuxtv.org/media_tree.git
 S:	Maintained
 S:	Maintained
 F:	drivers/media/usb/gspca/
 F:	drivers/media/usb/gspca/
 
 
+GUID PARTITION TABLE (GPT)
+M:	Davidlohr Bueso <davidlohr@hp.com>
+L:	linux-efi@vger.kernel.org
+S:	Maintained
+F:	block/partitions/efi.*
+
 STK1160 USB VIDEO CAPTURE DRIVER
 STK1160 USB VIDEO CAPTURE DRIVER
 M:	Ezequiel Garcia <elezegarcia@gmail.com>
 M:	Ezequiel Garcia <elezegarcia@gmail.com>
 L:	linux-media@vger.kernel.org
 L:	linux-media@vger.kernel.org
@@ -5911,12 +5926,21 @@ M:	Steffen Klassert <steffen.klassert@secunet.com>
 M:	Herbert Xu <herbert@gondor.apana.org.au>
 M:	Herbert Xu <herbert@gondor.apana.org.au>
 M:	"David S. Miller" <davem@davemloft.net>
 M:	"David S. Miller" <davem@davemloft.net>
 L:	netdev@vger.kernel.org
 L:	netdev@vger.kernel.org
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next.git
 S:	Maintained
 S:	Maintained
 F:	net/xfrm/
 F:	net/xfrm/
 F:	net/key/
 F:	net/key/
 F:	net/ipv4/xfrm*
 F:	net/ipv4/xfrm*
+F:	net/ipv4/esp4.c
+F:	net/ipv4/ah4.c
+F:	net/ipv4/ipcomp.c
+F:	net/ipv4/ip_vti.c
 F:	net/ipv6/xfrm*
 F:	net/ipv6/xfrm*
+F:	net/ipv6/esp6.c
+F:	net/ipv6/ah6.c
+F:	net/ipv6/ipcomp6.c
+F:	net/ipv6/ip6_vti.c
 F:	include/uapi/linux/xfrm.h
 F:	include/uapi/linux/xfrm.h
 F:	include/net/xfrm.h
 F:	include/net/xfrm.h
 
 
@@ -6237,7 +6261,7 @@ F:	drivers/i2c/busses/i2c-ocores.c
 
 
 OPEN FIRMWARE AND FLATTENED DEVICE TREE
 OPEN FIRMWARE AND FLATTENED DEVICE TREE
 M:	Grant Likely <grant.likely@linaro.org>
 M:	Grant Likely <grant.likely@linaro.org>
-M:	Rob Herring <rob.herring@calxeda.com>
+M:	Rob Herring <robh+dt@kernel.org>
 L:	devicetree@vger.kernel.org
 L:	devicetree@vger.kernel.org
 W:	http://fdt.secretlab.ca
 W:	http://fdt.secretlab.ca
 T:	git git://git.secretlab.ca/git/linux-2.6.git
 T:	git git://git.secretlab.ca/git/linux-2.6.git
@@ -6249,7 +6273,7 @@ K:	of_get_property
 K:	of_match_table
 K:	of_match_table
 
 
 OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
 OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
-M:	Rob Herring <rob.herring@calxeda.com>
+M:	Rob Herring <robh+dt@kernel.org>
 M:	Pawel Moll <pawel.moll@arm.com>
 M:	Pawel Moll <pawel.moll@arm.com>
 M:	Mark Rutland <mark.rutland@arm.com>
 M:	Mark Rutland <mark.rutland@arm.com>
 M:	Ian Campbell <ijc+devicetree@hellion.org.uk>
 M:	Ian Campbell <ijc+devicetree@hellion.org.uk>
@@ -9571,7 +9595,7 @@ F:	drivers/xen/*swiotlb*
 
 
 XFS FILESYSTEM
 XFS FILESYSTEM
 P:	Silicon Graphics Inc
 P:	Silicon Graphics Inc
-M:	Dave Chinner <dchinner@fromorbit.com>
+M:	Dave Chinner <david@fromorbit.com>
 M:	Ben Myers <bpm@sgi.com>
 M:	Ben Myers <bpm@sgi.com>
 M:	xfs@oss.sgi.com
 M:	xfs@oss.sgi.com
 L:	xfs@oss.sgi.com
 L:	xfs@oss.sgi.com

+ 10 - 14
Makefile

@@ -1,7 +1,7 @@
 VERSION = 3
 VERSION = 3
 PATCHLEVEL = 13
 PATCHLEVEL = 13
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc8
 NAME = One Giant Leap for Frogkind
 NAME = One Giant Leap for Frogkind
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*
@@ -732,19 +732,15 @@ export mod_strip_cmd
 # Select initial ramdisk compression format, default is gzip(1).
 # Select initial ramdisk compression format, default is gzip(1).
 # This shall be used by the dracut(8) tool while creating an initramfs image.
 # This shall be used by the dracut(8) tool while creating an initramfs image.
 #
 #
-INITRD_COMPRESS=gzip
-ifeq ($(CONFIG_RD_BZIP2), y)
-        INITRD_COMPRESS=bzip2
-else ifeq ($(CONFIG_RD_LZMA), y)
-        INITRD_COMPRESS=lzma
-else ifeq ($(CONFIG_RD_XZ), y)
-        INITRD_COMPRESS=xz
-else ifeq ($(CONFIG_RD_LZO), y)
-        INITRD_COMPRESS=lzo
-else ifeq ($(CONFIG_RD_LZ4), y)
-        INITRD_COMPRESS=lz4
-endif
-export INITRD_COMPRESS
+INITRD_COMPRESS-y                  := gzip
+INITRD_COMPRESS-$(CONFIG_RD_BZIP2) := bzip2
+INITRD_COMPRESS-$(CONFIG_RD_LZMA)  := lzma
+INITRD_COMPRESS-$(CONFIG_RD_XZ)    := xz
+INITRD_COMPRESS-$(CONFIG_RD_LZO)   := lzo
+INITRD_COMPRESS-$(CONFIG_RD_LZ4)   := lz4
+# do not export INITRD_COMPRESS, since we didn't actually
+# choose a sane default compression above.
+# export INITRD_COMPRESS := $(INITRD_COMPRESS-y)
 
 
 ifdef CONFIG_MODULE_SIG_ALL
 ifdef CONFIG_MODULE_SIG_ALL
 MODSECKEY = ./signing_key.priv
 MODSECKEY = ./signing_key.priv

+ 7 - 1
arch/arc/include/uapi/asm/unistd.h

@@ -8,7 +8,11 @@
 
 
 /******** no-legacy-syscalls-ABI *******/
 /******** no-legacy-syscalls-ABI *******/
 
 
-#ifndef _UAPI_ASM_ARC_UNISTD_H
+/*
+ * Non-typical guard macro to enable inclusion twice in ARCH sys.c
+ * That is how the Generic syscall wrapper generator works
+ */
+#if !defined(_UAPI_ASM_ARC_UNISTD_H) || defined(__SYSCALL)
 #define _UAPI_ASM_ARC_UNISTD_H
 #define _UAPI_ASM_ARC_UNISTD_H
 
 
 #define __ARCH_WANT_SYS_EXECVE
 #define __ARCH_WANT_SYS_EXECVE
@@ -36,4 +40,6 @@ __SYSCALL(__NR_arc_gettls, sys_arc_gettls)
 #define __NR_sysfs		(__NR_arch_specific_syscall + 3)
 #define __NR_sysfs		(__NR_arch_specific_syscall + 3)
 __SYSCALL(__NR_sysfs, sys_sysfs)
 __SYSCALL(__NR_sysfs, sys_sysfs)
 
 
+#undef __SYSCALL
+
 #endif
 #endif

+ 1 - 1
arch/arm/boot/dts/exynos5250.dtsi

@@ -559,7 +559,7 @@
 			compatible = "arm,pl330", "arm,primecell";
 			compatible = "arm,pl330", "arm,primecell";
 			reg = <0x10800000 0x1000>;
 			reg = <0x10800000 0x1000>;
 			interrupts = <0 33 0>;
 			interrupts = <0 33 0>;
-			clocks = <&clock 271>;
+			clocks = <&clock 346>;
 			clock-names = "apb_pclk";
 			clock-names = "apb_pclk";
 			#dma-cells = <1>;
 			#dma-cells = <1>;
 			#dma-channels = <8>;
 			#dma-channels = <8>;

+ 14 - 14
arch/arm/boot/dts/r8a7790.dtsi

@@ -87,9 +87,9 @@
 		interrupts = <1 9 0xf04>;
 		interrupts = <1 9 0xf04>;
 	};
 	};
 
 
-	gpio0: gpio@ffc40000 {
+	gpio0: gpio@e6050000 {
 		compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
 		compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
-		reg = <0 0xffc40000 0 0x2c>;
+		reg = <0 0xe6050000 0 0x50>;
 		interrupt-parent = <&gic>;
 		interrupt-parent = <&gic>;
 		interrupts = <0 4 0x4>;
 		interrupts = <0 4 0x4>;
 		#gpio-cells = <2>;
 		#gpio-cells = <2>;
@@ -99,9 +99,9 @@
 		interrupt-controller;
 		interrupt-controller;
 	};
 	};
 
 
-	gpio1: gpio@ffc41000 {
+	gpio1: gpio@e6051000 {
 		compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
 		compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
-		reg = <0 0xffc41000 0 0x2c>;
+		reg = <0 0xe6051000 0 0x50>;
 		interrupt-parent = <&gic>;
 		interrupt-parent = <&gic>;
 		interrupts = <0 5 0x4>;
 		interrupts = <0 5 0x4>;
 		#gpio-cells = <2>;
 		#gpio-cells = <2>;
@@ -111,9 +111,9 @@
 		interrupt-controller;
 		interrupt-controller;
 	};
 	};
 
 
-	gpio2: gpio@ffc42000 {
+	gpio2: gpio@e6052000 {
 		compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
 		compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
-		reg = <0 0xffc42000 0 0x2c>;
+		reg = <0 0xe6052000 0 0x50>;
 		interrupt-parent = <&gic>;
 		interrupt-parent = <&gic>;
 		interrupts = <0 6 0x4>;
 		interrupts = <0 6 0x4>;
 		#gpio-cells = <2>;
 		#gpio-cells = <2>;
@@ -123,9 +123,9 @@
 		interrupt-controller;
 		interrupt-controller;
 	};
 	};
 
 
-	gpio3: gpio@ffc43000 {
+	gpio3: gpio@e6053000 {
 		compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
 		compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
-		reg = <0 0xffc43000 0 0x2c>;
+		reg = <0 0xe6053000 0 0x50>;
 		interrupt-parent = <&gic>;
 		interrupt-parent = <&gic>;
 		interrupts = <0 7 0x4>;
 		interrupts = <0 7 0x4>;
 		#gpio-cells = <2>;
 		#gpio-cells = <2>;
@@ -135,9 +135,9 @@
 		interrupt-controller;
 		interrupt-controller;
 	};
 	};
 
 
-	gpio4: gpio@ffc44000 {
+	gpio4: gpio@e6054000 {
 		compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
 		compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
-		reg = <0 0xffc44000 0 0x2c>;
+		reg = <0 0xe6054000 0 0x50>;
 		interrupt-parent = <&gic>;
 		interrupt-parent = <&gic>;
 		interrupts = <0 8 0x4>;
 		interrupts = <0 8 0x4>;
 		#gpio-cells = <2>;
 		#gpio-cells = <2>;
@@ -147,9 +147,9 @@
 		interrupt-controller;
 		interrupt-controller;
 	};
 	};
 
 
-	gpio5: gpio@ffc45000 {
+	gpio5: gpio@e6055000 {
 		compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
 		compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
-		reg = <0 0xffc45000 0 0x2c>;
+		reg = <0 0xe6055000 0 0x50>;
 		interrupt-parent = <&gic>;
 		interrupt-parent = <&gic>;
 		interrupts = <0 9 0x4>;
 		interrupts = <0 9 0x4>;
 		#gpio-cells = <2>;
 		#gpio-cells = <2>;
@@ -241,7 +241,7 @@
 
 
 	sdhi0: sdhi@ee100000 {
 	sdhi0: sdhi@ee100000 {
 		compatible = "renesas,sdhi-r8a7790";
 		compatible = "renesas,sdhi-r8a7790";
-		reg = <0 0xee100000 0 0x100>;
+		reg = <0 0xee100000 0 0x200>;
 		interrupt-parent = <&gic>;
 		interrupt-parent = <&gic>;
 		interrupts = <0 165 4>;
 		interrupts = <0 165 4>;
 		cap-sd-highspeed;
 		cap-sd-highspeed;
@@ -250,7 +250,7 @@
 
 
 	sdhi1: sdhi@ee120000 {
 	sdhi1: sdhi@ee120000 {
 		compatible = "renesas,sdhi-r8a7790";
 		compatible = "renesas,sdhi-r8a7790";
-		reg = <0 0xee120000 0 0x100>;
+		reg = <0 0xee120000 0 0x200>;
 		interrupt-parent = <&gic>;
 		interrupt-parent = <&gic>;
 		interrupts = <0 166 4>;
 		interrupts = <0 166 4>;
 		cap-sd-highspeed;
 		cap-sd-highspeed;

+ 1 - 1
arch/arm/crypto/aesbs-core.S_shipped

@@ -58,7 +58,7 @@
 # define VFP_ABI_FRAME	0
 # define VFP_ABI_FRAME	0
 # define BSAES_ASM_EXTENDED_KEY
 # define BSAES_ASM_EXTENDED_KEY
 # define XTS_CHAIN_TWEAK
 # define XTS_CHAIN_TWEAK
-# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+# define __ARM_ARCH__	7
 #endif
 #endif
 
 
 #ifdef __thumb__
 #ifdef __thumb__

+ 1 - 1
arch/arm/crypto/bsaes-armv7.pl

@@ -701,7 +701,7 @@ $code.=<<___;
 # define VFP_ABI_FRAME	0
 # define VFP_ABI_FRAME	0
 # define BSAES_ASM_EXTENDED_KEY
 # define BSAES_ASM_EXTENDED_KEY
 # define XTS_CHAIN_TWEAK
 # define XTS_CHAIN_TWEAK
-# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+# define __ARM_ARCH__	7
 #endif
 #endif
 
 
 #ifdef __thumb__
 #ifdef __thumb__

+ 1 - 1
arch/arm/include/asm/io.h

@@ -329,7 +329,7 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
  */
  */
 #define ioremap(cookie,size)		__arm_ioremap((cookie), (size), MT_DEVICE)
 #define ioremap(cookie,size)		__arm_ioremap((cookie), (size), MT_DEVICE)
 #define ioremap_nocache(cookie,size)	__arm_ioremap((cookie), (size), MT_DEVICE)
 #define ioremap_nocache(cookie,size)	__arm_ioremap((cookie), (size), MT_DEVICE)
-#define ioremap_cached(cookie,size)	__arm_ioremap((cookie), (size), MT_DEVICE_CACHED)
+#define ioremap_cache(cookie,size)	__arm_ioremap((cookie), (size), MT_DEVICE_CACHED)
 #define ioremap_wc(cookie,size)		__arm_ioremap((cookie), (size), MT_DEVICE_WC)
 #define ioremap_wc(cookie,size)		__arm_ioremap((cookie), (size), MT_DEVICE_WC)
 #define iounmap				__arm_iounmap
 #define iounmap				__arm_iounmap
 
 

+ 2 - 1
arch/arm/include/asm/memory.h

@@ -347,7 +347,8 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
 #define ARCH_PFN_OFFSET		PHYS_PFN_OFFSET
 #define ARCH_PFN_OFFSET		PHYS_PFN_OFFSET
 
 
 #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
 #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define virt_addr_valid(kaddr)	((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
+#define virt_addr_valid(kaddr)	(((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
+					&& pfn_valid(__pa(kaddr) >> PAGE_SHIFT) )
 
 
 #endif
 #endif
 
 

+ 1 - 1
arch/arm/include/asm/xen/page.h

@@ -117,6 +117,6 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
 	return __set_phys_to_machine(pfn, mfn);
 	return __set_phys_to_machine(pfn, mfn);
 }
 }
 
 
-#define xen_remap(cookie, size) ioremap_cached((cookie), (size));
+#define xen_remap(cookie, size) ioremap_cache((cookie), (size));
 
 
 #endif /* _ASM_ARM_XEN_PAGE_H */
 #endif /* _ASM_ARM_XEN_PAGE_H */

+ 7 - 1
arch/arm/kernel/traps.c

@@ -36,7 +36,13 @@
 #include <asm/system_misc.h>
 #include <asm/system_misc.h>
 #include <asm/opcodes.h>
 #include <asm/opcodes.h>
 
 
-static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
+static const char *handler[]= {
+	"prefetch abort",
+	"data abort",
+	"address exception",
+	"interrupt",
+	"undefined instruction",
+};
 
 
 void *vectors_page;
 void *vectors_page;
 
 

+ 3 - 2
arch/arm/mach-footbridge/dc21285-timer.c

@@ -96,11 +96,12 @@ static struct irqaction footbridge_timer_irq = {
 void __init footbridge_timer_init(void)
 void __init footbridge_timer_init(void)
 {
 {
 	struct clock_event_device *ce = &ckevt_dc21285;
 	struct clock_event_device *ce = &ckevt_dc21285;
+	unsigned rate = DIV_ROUND_CLOSEST(mem_fclk_21285, 16);
 
 
-	clocksource_register_hz(&cksrc_dc21285, (mem_fclk_21285 + 8) / 16);
+	clocksource_register_hz(&cksrc_dc21285, rate);
 
 
 	setup_irq(ce->irq, &footbridge_timer_irq);
 	setup_irq(ce->irq, &footbridge_timer_irq);
 
 
 	ce->cpumask = cpumask_of(smp_processor_id());
 	ce->cpumask = cpumask_of(smp_processor_id());
-	clockevents_config_and_register(ce, mem_fclk_21285, 0x4, 0xffffff);
+	clockevents_config_and_register(ce, rate, 0x4, 0xffffff);
 }
 }

+ 6 - 1
arch/arm/mach-omap2/board-ldp.c

@@ -242,12 +242,18 @@ static void __init ldp_display_init(void)
 
 
 static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio)
 static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio)
 {
 {
+	int res;
+
 	/* LCD enable GPIO */
 	/* LCD enable GPIO */
 	ldp_lcd_pdata.enable_gpio = gpio + 7;
 	ldp_lcd_pdata.enable_gpio = gpio + 7;
 
 
 	/* Backlight enable GPIO */
 	/* Backlight enable GPIO */
 	ldp_lcd_pdata.backlight_gpio = gpio + 15;
 	ldp_lcd_pdata.backlight_gpio = gpio + 15;
 
 
+	res = platform_device_register(&ldp_lcd_device);
+	if (res)
+		pr_err("Unable to register LCD: %d\n", res);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -346,7 +352,6 @@ static struct omap2_hsmmc_info mmc[] __initdata = {
 
 
 static struct platform_device *ldp_devices[] __initdata = {
 static struct platform_device *ldp_devices[] __initdata = {
 	&ldp_gpio_keys_device,
 	&ldp_gpio_keys_device,
-	&ldp_lcd_device,
 };
 };
 
 
 #ifdef CONFIG_OMAP_MUX
 #ifdef CONFIG_OMAP_MUX

+ 38 - 0
arch/arm/mach-omap2/display.c

@@ -101,13 +101,51 @@ static const struct omap_dss_hwmod_data omap4_dss_hwmod_data[] __initconst = {
 	{ "dss_hdmi", "omapdss_hdmi", -1 },
 	{ "dss_hdmi", "omapdss_hdmi", -1 },
 };
 };
 
 
+static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
+{
+	u32 enable_mask, enable_shift;
+	u32 pipd_mask, pipd_shift;
+	u32 reg;
+
+	if (dsi_id == 0) {
+		enable_mask = OMAP4_DSI1_LANEENABLE_MASK;
+		enable_shift = OMAP4_DSI1_LANEENABLE_SHIFT;
+		pipd_mask = OMAP4_DSI1_PIPD_MASK;
+		pipd_shift = OMAP4_DSI1_PIPD_SHIFT;
+	} else if (dsi_id == 1) {
+		enable_mask = OMAP4_DSI2_LANEENABLE_MASK;
+		enable_shift = OMAP4_DSI2_LANEENABLE_SHIFT;
+		pipd_mask = OMAP4_DSI2_PIPD_MASK;
+		pipd_shift = OMAP4_DSI2_PIPD_SHIFT;
+	} else {
+		return -ENODEV;
+	}
+
+	reg = omap4_ctrl_pad_readl(OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY);
+
+	reg &= ~enable_mask;
+	reg &= ~pipd_mask;
+
+	reg |= (lanes << enable_shift) & enable_mask;
+	reg |= (lanes << pipd_shift) & pipd_mask;
+
+	omap4_ctrl_pad_writel(reg, OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY);
+
+	return 0;
+}
+
 static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask)
 static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask)
 {
 {
+	if (cpu_is_omap44xx())
+		return omap4_dsi_mux_pads(dsi_id, lane_mask);
+
 	return 0;
 	return 0;
 }
 }
 
 
 static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask)
 static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask)
 {
 {
+	if (cpu_is_omap44xx())
+		omap4_dsi_mux_pads(dsi_id, 0);
 }
 }
 
 
 static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput)
 static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput)

+ 2 - 2
arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c

@@ -796,7 +796,7 @@ struct omap_hwmod omap2xxx_counter_32k_hwmod = {
 
 
 /* gpmc */
 /* gpmc */
 static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = {
 static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = {
-	{ .irq = 20 },
+	{ .irq = 20 + OMAP_INTC_START, },
 	{ .irq = -1 }
 	{ .irq = -1 }
 };
 };
 
 
@@ -841,7 +841,7 @@ static struct omap_hwmod_class omap2_rng_hwmod_class = {
 };
 };
 
 
 static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = {
 static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = {
-	{ .irq = 52 },
+	{ .irq = 52 + OMAP_INTC_START, },
 	{ .irq = -1 }
 	{ .irq = -1 }
 };
 };
 
 

+ 3 - 3
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c

@@ -2165,7 +2165,7 @@ static struct omap_hwmod_class omap3xxx_gpmc_hwmod_class = {
 };
 };
 
 
 static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = {
 static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = {
-	{ .irq = 20 },
+	{ .irq = 20 + OMAP_INTC_START, },
 	{ .irq = -1 }
 	{ .irq = -1 }
 };
 };
 
 
@@ -2999,7 +2999,7 @@ static struct omap_mmu_dev_attr mmu_isp_dev_attr = {
 
 
 static struct omap_hwmod omap3xxx_mmu_isp_hwmod;
 static struct omap_hwmod omap3xxx_mmu_isp_hwmod;
 static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = {
 static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = {
-	{ .irq = 24 },
+	{ .irq = 24 + OMAP_INTC_START, },
 	{ .irq = -1 }
 	{ .irq = -1 }
 };
 };
 
 
@@ -3041,7 +3041,7 @@ static struct omap_mmu_dev_attr mmu_iva_dev_attr = {
 
 
 static struct omap_hwmod omap3xxx_mmu_iva_hwmod;
 static struct omap_hwmod omap3xxx_mmu_iva_hwmod;
 static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = {
 static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = {
-	{ .irq = 28 },
+	{ .irq = 28 + OMAP_INTC_START, },
 	{ .irq = -1 }
 	{ .irq = -1 }
 };
 };
 
 

+ 1 - 1
arch/arm/mach-omap2/omap_hwmod_7xx_data.c

@@ -1637,7 +1637,7 @@ static struct omap_hwmod dra7xx_uart1_hwmod = {
 	.class		= &dra7xx_uart_hwmod_class,
 	.class		= &dra7xx_uart_hwmod_class,
 	.clkdm_name	= "l4per_clkdm",
 	.clkdm_name	= "l4per_clkdm",
 	.main_clk	= "uart1_gfclk_mux",
 	.main_clk	= "uart1_gfclk_mux",
-	.flags		= HWMOD_SWSUP_SIDLE_ACT,
+	.flags		= HWMOD_SWSUP_SIDLE_ACT | DEBUG_OMAP2UART1_FLAGS,
 	.prcm = {
 	.prcm = {
 		.omap4 = {
 		.omap4 = {
 			.clkctrl_offs = DRA7XX_CM_L4PER_UART1_CLKCTRL_OFFSET,
 			.clkctrl_offs = DRA7XX_CM_L4PER_UART1_CLKCTRL_OFFSET,

+ 2 - 0
arch/arm/mach-pxa/include/mach/lubbock.h

@@ -10,6 +10,8 @@
  * published by the Free Software Foundation.
  * published by the Free Software Foundation.
  */
  */
 
 
+#include <mach/irqs.h>
+
 #define LUBBOCK_ETH_PHYS	PXA_CS3_PHYS
 #define LUBBOCK_ETH_PHYS	PXA_CS3_PHYS
 
 
 #define LUBBOCK_FPGA_PHYS	PXA_CS2_PHYS
 #define LUBBOCK_FPGA_PHYS	PXA_CS2_PHYS

+ 1 - 10
arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c

@@ -8,8 +8,6 @@
  * published by the Free Software Foundation.
  * published by the Free Software Foundation.
 */
 */
 
 
-#include <linux/clk-provider.h>
-#include <linux/irqchip.h>
 #include <linux/of_platform.h>
 #include <linux/of_platform.h>
 
 
 #include <asm/mach/arch.h>
 #include <asm/mach/arch.h>
@@ -48,15 +46,9 @@ static void __init s3c64xx_dt_map_io(void)
 		panic("SoC is not S3C64xx!");
 		panic("SoC is not S3C64xx!");
 }
 }
 
 
-static void __init s3c64xx_dt_init_irq(void)
-{
-	of_clk_init(NULL);
-	samsung_wdt_reset_of_init();
-	irqchip_init();
-};
-
 static void __init s3c64xx_dt_init_machine(void)
 static void __init s3c64xx_dt_init_machine(void)
 {
 {
+	samsung_wdt_reset_of_init();
 	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 }
 }
 
 
@@ -79,7 +71,6 @@ DT_MACHINE_START(S3C6400_DT, "Samsung S3C64xx (Flattened Device Tree)")
 	/* Maintainer: Tomasz Figa <tomasz.figa@gmail.com> */
 	/* Maintainer: Tomasz Figa <tomasz.figa@gmail.com> */
 	.dt_compat	= s3c64xx_dt_compat,
 	.dt_compat	= s3c64xx_dt_compat,
 	.map_io		= s3c64xx_dt_map_io,
 	.map_io		= s3c64xx_dt_map_io,
-	.init_irq	= s3c64xx_dt_init_irq,
 	.init_machine	= s3c64xx_dt_init_machine,
 	.init_machine	= s3c64xx_dt_init_machine,
 	.restart        = s3c64xx_dt_restart,
 	.restart        = s3c64xx_dt_restart,
 MACHINE_END
 MACHINE_END

+ 9 - 2
arch/arm/mach-shmobile/board-armadillo800eva.c

@@ -483,7 +483,7 @@ static struct platform_device lcdc0_device = {
 	.id		= 0,
 	.id		= 0,
 	.dev	= {
 	.dev	= {
 		.platform_data	= &lcdc0_info,
 		.platform_data	= &lcdc0_info,
-		.coherent_dma_mask = ~0,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 	},
 	},
 };
 };
 
 
@@ -580,7 +580,7 @@ static struct platform_device hdmi_lcdc_device = {
 	.id		= 1,
 	.id		= 1,
 	.dev	= {
 	.dev	= {
 		.platform_data	= &hdmi_lcdc_info,
 		.platform_data	= &hdmi_lcdc_info,
-		.coherent_dma_mask = ~0,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 	},
 	},
 };
 };
 
 
@@ -614,6 +614,11 @@ static struct regulator_consumer_supply fixed3v3_power_consumers[] = {
 	REGULATOR_SUPPLY("vqmmc", "sh_mmcif"),
 	REGULATOR_SUPPLY("vqmmc", "sh_mmcif"),
 };
 };
 
 
+/* Fixed 3.3V regulator used by LCD backlight */
+static struct regulator_consumer_supply fixed5v0_power_consumers[] = {
+	REGULATOR_SUPPLY("power", "pwm-backlight.0"),
+};
+
 /* Fixed 3.3V regulator to be used by SDHI0 */
 /* Fixed 3.3V regulator to be used by SDHI0 */
 static struct regulator_consumer_supply vcc_sdhi0_consumers[] = {
 static struct regulator_consumer_supply vcc_sdhi0_consumers[] = {
 	REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
 	REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
@@ -1196,6 +1201,8 @@ static void __init eva_init(void)
 
 
 	regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers,
 	regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers,
 				     ARRAY_SIZE(fixed3v3_power_consumers), 3300000);
 				     ARRAY_SIZE(fixed3v3_power_consumers), 3300000);
+	regulator_register_always_on(3, "fixed-5.0V", fixed5v0_power_consumers,
+				     ARRAY_SIZE(fixed5v0_power_consumers), 5000000);
 
 
 	pinctrl_register_mappings(eva_pinctrl_map, ARRAY_SIZE(eva_pinctrl_map));
 	pinctrl_register_mappings(eva_pinctrl_map, ARRAY_SIZE(eva_pinctrl_map));
 	pwm_add_table(pwm_lookup, ARRAY_SIZE(pwm_lookup));
 	pwm_add_table(pwm_lookup, ARRAY_SIZE(pwm_lookup));

+ 1 - 1
arch/arm/mach-shmobile/board-bockw.c

@@ -679,7 +679,7 @@ static void __init bockw_init(void)
 			.id             = i,
 			.id             = i,
 			.data           = &rsnd_card_info[i],
 			.data           = &rsnd_card_info[i],
 			.size_data      = sizeof(struct asoc_simple_card_info),
 			.size_data      = sizeof(struct asoc_simple_card_info),
-			.dma_mask       = ~0,
+			.dma_mask	= DMA_BIT_MASK(32),
 		};
 		};
 
 
 		platform_device_register_full(&cardinfo);
 		platform_device_register_full(&cardinfo);

+ 1 - 1
arch/arm/mach-shmobile/board-kzm9g.c

@@ -334,7 +334,7 @@ static struct platform_device lcdc_device = {
 	.resource	= lcdc_resources,
 	.resource	= lcdc_resources,
 	.dev	= {
 	.dev	= {
 		.platform_data	= &lcdc_info,
 		.platform_data	= &lcdc_info,
-		.coherent_dma_mask = ~0,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 	},
 	},
 };
 };
 
 

+ 3 - 1
arch/arm/mach-shmobile/board-lager.c

@@ -245,7 +245,9 @@ static void __init lager_init(void)
 {
 {
 	lager_add_standard_devices();
 	lager_add_standard_devices();
 
 
-	phy_register_fixup_for_id("r8a7790-ether-ff:01", lager_ksz8041_fixup);
+	if (IS_ENABLED(CONFIG_PHYLIB))
+		phy_register_fixup_for_id("r8a7790-ether-ff:01",
+					  lager_ksz8041_fixup);
 }
 }
 
 
 static const char * const lager_boards_compat_dt[] __initconst = {
 static const char * const lager_boards_compat_dt[] __initconst = {

+ 2 - 2
arch/arm/mach-shmobile/board-mackerel.c

@@ -409,7 +409,7 @@ static struct platform_device lcdc_device = {
 	.resource	= lcdc_resources,
 	.resource	= lcdc_resources,
 	.dev	= {
 	.dev	= {
 		.platform_data	= &lcdc_info,
 		.platform_data	= &lcdc_info,
-		.coherent_dma_mask = ~0,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 	},
 	},
 };
 };
 
 
@@ -499,7 +499,7 @@ static struct platform_device hdmi_lcdc_device = {
 	.id		= 1,
 	.id		= 1,
 	.dev	= {
 	.dev	= {
 		.platform_data	= &hdmi_lcdc_info,
 		.platform_data	= &hdmi_lcdc_info,
-		.coherent_dma_mask = ~0,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 	},
 	},
 };
 };
 
 

+ 3 - 3
arch/arm/mm/flush.c

@@ -175,16 +175,16 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
 		unsigned long i;
 		unsigned long i;
 		if (cache_is_vipt_nonaliasing()) {
 		if (cache_is_vipt_nonaliasing()) {
 			for (i = 0; i < (1 << compound_order(page)); i++) {
 			for (i = 0; i < (1 << compound_order(page)); i++) {
-				void *addr = kmap_atomic(page);
+				void *addr = kmap_atomic(page + i);
 				__cpuc_flush_dcache_area(addr, PAGE_SIZE);
 				__cpuc_flush_dcache_area(addr, PAGE_SIZE);
 				kunmap_atomic(addr);
 				kunmap_atomic(addr);
 			}
 			}
 		} else {
 		} else {
 			for (i = 0; i < (1 << compound_order(page)); i++) {
 			for (i = 0; i < (1 << compound_order(page)); i++) {
-				void *addr = kmap_high_get(page);
+				void *addr = kmap_high_get(page + i);
 				if (addr) {
 				if (addr) {
 					__cpuc_flush_dcache_area(addr, PAGE_SIZE);
 					__cpuc_flush_dcache_area(addr, PAGE_SIZE);
-					kunmap_high(page);
+					kunmap_high(page + i);
 				}
 				}
 			}
 			}
 		}
 		}

+ 3 - 3
arch/arm/xen/enlighten.c

@@ -96,7 +96,7 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
 	struct remap_data *info = data;
 	struct remap_data *info = data;
 	struct page *page = info->pages[info->index++];
 	struct page *page = info->pages[info->index++];
 	unsigned long pfn = page_to_pfn(page);
 	unsigned long pfn = page_to_pfn(page);
-	pte_t pte = pfn_pte(pfn, info->prot);
+	pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot));
 
 
 	if (map_foreign_page(pfn, info->fgmfn, info->domid))
 	if (map_foreign_page(pfn, info->fgmfn, info->domid))
 		return -EFAULT;
 		return -EFAULT;
@@ -224,10 +224,10 @@ static int __init xen_guest_init(void)
 	}
 	}
 	if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res))
 	if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res))
 		return 0;
 		return 0;
-	xen_hvm_resume_frames = res.start >> PAGE_SHIFT;
+	xen_hvm_resume_frames = res.start;
 	xen_events_irq = irq_of_parse_and_map(node, 0);
 	xen_events_irq = irq_of_parse_and_map(node, 0);
 	pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n",
 	pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n",
-			version, xen_events_irq, xen_hvm_resume_frames);
+			version, xen_events_irq, (xen_hvm_resume_frames >> PAGE_SHIFT));
 	xen_domain_type = XEN_HVM_DOMAIN;
 	xen_domain_type = XEN_HVM_DOMAIN;
 
 
 	xen_setup_features();
 	xen_setup_features();

+ 0 - 4
arch/arm64/include/asm/xen/page-coherent.h

@@ -23,25 +23,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
 	     unsigned long offset, size_t size, enum dma_data_direction dir,
 	     unsigned long offset, size_t size, enum dma_data_direction dir,
 	     struct dma_attrs *attrs)
 	     struct dma_attrs *attrs)
 {
 {
-	__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
 }
 }
 
 
 static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
 static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
 		size_t size, enum dma_data_direction dir,
 		size_t size, enum dma_data_direction dir,
 		struct dma_attrs *attrs)
 		struct dma_attrs *attrs)
 {
 {
-	__generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
 }
 }
 
 
 static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
 static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
 {
 {
-	__generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
 }
 }
 
 
 static inline void xen_dma_sync_single_for_device(struct device *hwdev,
 static inline void xen_dma_sync_single_for_device(struct device *hwdev,
 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
 {
 {
-	__generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
 }
 }
 #endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
 #endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */

+ 18 - 20
arch/arm64/kernel/ptrace.c

@@ -214,31 +214,29 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
 {
 {
 	int err, len, type, disabled = !ctrl.enabled;
 	int err, len, type, disabled = !ctrl.enabled;
 
 
-	if (disabled) {
-		len = 0;
-		type = HW_BREAKPOINT_EMPTY;
-	} else {
-		err = arch_bp_generic_fields(ctrl, &len, &type);
-		if (err)
-			return err;
-
-		switch (note_type) {
-		case NT_ARM_HW_BREAK:
-			if ((type & HW_BREAKPOINT_X) != type)
-				return -EINVAL;
-			break;
-		case NT_ARM_HW_WATCH:
-			if ((type & HW_BREAKPOINT_RW) != type)
-				return -EINVAL;
-			break;
-		default:
+	attr->disabled = disabled;
+	if (disabled)
+		return 0;
+
+	err = arch_bp_generic_fields(ctrl, &len, &type);
+	if (err)
+		return err;
+
+	switch (note_type) {
+	case NT_ARM_HW_BREAK:
+		if ((type & HW_BREAKPOINT_X) != type)
 			return -EINVAL;
 			return -EINVAL;
-		}
+		break;
+	case NT_ARM_HW_WATCH:
+		if ((type & HW_BREAKPOINT_RW) != type)
+			return -EINVAL;
+		break;
+	default:
+		return -EINVAL;
 	}
 	}
 
 
 	attr->bp_len	= len;
 	attr->bp_len	= len;
 	attr->bp_type	= type;
 	attr->bp_type	= type;
-	attr->disabled	= disabled;
 
 
 	return 0;
 	return 0;
 }
 }

+ 4 - 8
arch/parisc/include/asm/cacheflush.h

@@ -125,42 +125,38 @@ flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma
 void mark_rodata_ro(void);
 void mark_rodata_ro(void);
 #endif
 #endif
 
 
-#ifdef CONFIG_PA8X00
-/* Only pa8800, pa8900 needs this */
-
 #include <asm/kmap_types.h>
 #include <asm/kmap_types.h>
 
 
 #define ARCH_HAS_KMAP
 #define ARCH_HAS_KMAP
 
 
-void kunmap_parisc(void *addr);
-
 static inline void *kmap(struct page *page)
 static inline void *kmap(struct page *page)
 {
 {
 	might_sleep();
 	might_sleep();
+	flush_dcache_page(page);
 	return page_address(page);
 	return page_address(page);
 }
 }
 
 
 static inline void kunmap(struct page *page)
 static inline void kunmap(struct page *page)
 {
 {
-	kunmap_parisc(page_address(page));
+	flush_kernel_dcache_page_addr(page_address(page));
 }
 }
 
 
 static inline void *kmap_atomic(struct page *page)
 static inline void *kmap_atomic(struct page *page)
 {
 {
 	pagefault_disable();
 	pagefault_disable();
+	flush_dcache_page(page);
 	return page_address(page);
 	return page_address(page);
 }
 }
 
 
 static inline void __kunmap_atomic(void *addr)
 static inline void __kunmap_atomic(void *addr)
 {
 {
-	kunmap_parisc(addr);
+	flush_kernel_dcache_page_addr(addr);
 	pagefault_enable();
 	pagefault_enable();
 }
 }
 
 
 #define kmap_atomic_prot(page, prot)	kmap_atomic(page)
 #define kmap_atomic_prot(page, prot)	kmap_atomic(page)
 #define kmap_atomic_pfn(pfn)	kmap_atomic(pfn_to_page(pfn))
 #define kmap_atomic_pfn(pfn)	kmap_atomic(pfn_to_page(pfn))
 #define kmap_atomic_to_page(ptr)	virt_to_page(ptr)
 #define kmap_atomic_to_page(ptr)	virt_to_page(ptr)
-#endif
 
 
 #endif /* _PARISC_CACHEFLUSH_H */
 #endif /* _PARISC_CACHEFLUSH_H */
 
 

+ 2 - 3
arch/parisc/include/asm/page.h

@@ -28,9 +28,8 @@ struct page;
 
 
 void clear_page_asm(void *page);
 void clear_page_asm(void *page);
 void copy_page_asm(void *to, void *from);
 void copy_page_asm(void *to, void *from);
-void clear_user_page(void *vto, unsigned long vaddr, struct page *pg);
-void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
-			   struct page *pg);
+#define clear_user_page(vto, vaddr, page) clear_page_asm(vto)
+#define copy_user_page(vto, vfrom, vaddr, page) copy_page_asm(vto, vfrom)
 
 
 /* #define CONFIG_PARISC_TMPALIAS */
 /* #define CONFIG_PARISC_TMPALIAS */
 
 

+ 0 - 35
arch/parisc/kernel/cache.c

@@ -388,41 +388,6 @@ void flush_kernel_dcache_page_addr(void *addr)
 }
 }
 EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
 EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
 
 
-void clear_user_page(void *vto, unsigned long vaddr, struct page *page)
-{
-	clear_page_asm(vto);
-	if (!parisc_requires_coherency())
-		flush_kernel_dcache_page_asm(vto);
-}
-EXPORT_SYMBOL(clear_user_page);
-
-void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
-	struct page *pg)
-{
-	/* Copy using kernel mapping.  No coherency is needed
-	   (all in kmap/kunmap) on machines that don't support
-	   non-equivalent aliasing.  However, the `from' page
-	   needs to be flushed before it can be accessed through
-	   the kernel mapping. */
-	preempt_disable();
-	flush_dcache_page_asm(__pa(vfrom), vaddr);
-	preempt_enable();
-	copy_page_asm(vto, vfrom);
-	if (!parisc_requires_coherency())
-		flush_kernel_dcache_page_asm(vto);
-}
-EXPORT_SYMBOL(copy_user_page);
-
-#ifdef CONFIG_PA8X00
-
-void kunmap_parisc(void *addr)
-{
-	if (parisc_requires_coherency())
-		flush_kernel_dcache_page_addr(addr);
-}
-EXPORT_SYMBOL(kunmap_parisc);
-#endif
-
 void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
 void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
 {
 {
 	unsigned long flags;
 	unsigned long flags;

+ 5 - 1
arch/powerpc/boot/dts/mpc5125twr.dts

@@ -58,7 +58,6 @@
 		compatible = "fsl,mpc5121-immr";
 		compatible = "fsl,mpc5121-immr";
 		#address-cells = <1>;
 		#address-cells = <1>;
 		#size-cells = <1>;
 		#size-cells = <1>;
-		#interrupt-cells = <2>;
 		ranges = <0x0 0x80000000 0x400000>;
 		ranges = <0x0 0x80000000 0x400000>;
 		reg = <0x80000000 0x400000>;
 		reg = <0x80000000 0x400000>;
 		bus-frequency = <66000000>;	// 66 MHz ips bus
 		bus-frequency = <66000000>;	// 66 MHz ips bus
@@ -189,6 +188,10 @@
 			reg = <0xA000 0x1000>;
 			reg = <0xA000 0x1000>;
 		};
 		};
 
 
+		// disable USB1 port
+		// TODO:
+		// correct pinmux config and fix USB3320 ulpi dependency
+		// before re-enabling it
 		usb@3000 {
 		usb@3000 {
 			compatible = "fsl,mpc5121-usb2-dr";
 			compatible = "fsl,mpc5121-usb2-dr";
 			reg = <0x3000 0x400>;
 			reg = <0x3000 0x400>;
@@ -197,6 +200,7 @@
 			interrupts = <43 0x8>;
 			interrupts = <43 0x8>;
 			dr_mode = "host";
 			dr_mode = "host";
 			phy_type = "ulpi";
 			phy_type = "ulpi";
+			status = "disabled";
 		};
 		};
 
 
 		// 5125 PSCs are not 52xx or 5121 PSC compatible
 		// 5125 PSCs are not 52xx or 5121 PSC compatible

+ 1 - 1
arch/powerpc/include/asm/exception-64s.h

@@ -284,7 +284,7 @@ do_kvm_##n:								\
 	subi	r1,r1,INT_FRAME_SIZE;	/* alloc frame on kernel stack	*/ \
 	subi	r1,r1,INT_FRAME_SIZE;	/* alloc frame on kernel stack	*/ \
 	beq-	1f;							   \
 	beq-	1f;							   \
 	ld	r1,PACAKSAVE(r13);	/* kernel stack to use		*/ \
 	ld	r1,PACAKSAVE(r13);	/* kernel stack to use		*/ \
-1:	cmpdi	cr1,r1,0;		/* check if r1 is in userspace	*/ \
+1:	cmpdi	cr1,r1,-INT_FRAME_SIZE;	/* check if r1 is in userspace	*/ \
 	blt+	cr1,3f;			/* abort if it is		*/ \
 	blt+	cr1,3f;			/* abort if it is		*/ \
 	li	r1,(n);			/* will be reloaded later	*/ \
 	li	r1,(n);			/* will be reloaded later	*/ \
 	sth	r1,PACA_TRAP_SAVE(r13);					   \
 	sth	r1,PACA_TRAP_SAVE(r13);					   \

+ 4 - 0
arch/powerpc/include/asm/kvm_book3s.h

@@ -192,6 +192,10 @@ extern void kvmppc_load_up_vsx(void);
 extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
 extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
 extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
 extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
 extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
 extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
+extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
+				 struct kvm_vcpu *vcpu);
+extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
+				   struct kvmppc_book3s_shadow_vcpu *svcpu);
 
 
 static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
 static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
 {
 {

+ 2 - 0
arch/powerpc/include/asm/kvm_book3s_asm.h

@@ -79,6 +79,7 @@ struct kvmppc_host_state {
 	ulong vmhandler;
 	ulong vmhandler;
 	ulong scratch0;
 	ulong scratch0;
 	ulong scratch1;
 	ulong scratch1;
+	ulong scratch2;
 	u8 in_guest;
 	u8 in_guest;
 	u8 restore_hid5;
 	u8 restore_hid5;
 	u8 napping;
 	u8 napping;
@@ -106,6 +107,7 @@ struct kvmppc_host_state {
 };
 };
 
 
 struct kvmppc_book3s_shadow_vcpu {
 struct kvmppc_book3s_shadow_vcpu {
+	bool in_use;
 	ulong gpr[14];
 	ulong gpr[14];
 	u32 cr;
 	u32 cr;
 	u32 xer;
 	u32 xer;

+ 2 - 2
arch/powerpc/include/asm/opal.h

@@ -720,13 +720,13 @@ int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe,
 int64_t opal_pci_poll(uint64_t phb_id);
 int64_t opal_pci_poll(uint64_t phb_id);
 int64_t opal_return_cpu(void);
 int64_t opal_return_cpu(void);
 
 
-int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, uint64_t *val);
+int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, __be64 *val);
 int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val);
 int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val);
 
 
 int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
 int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
 		       uint32_t addr, uint32_t data, uint32_t sz);
 		       uint32_t addr, uint32_t data, uint32_t sz);
 int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type,
 int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type,
-		      uint32_t addr, uint32_t *data, uint32_t sz);
+		      uint32_t addr, __be32 *data, uint32_t sz);
 int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result);
 int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result);
 int64_t opal_manage_flash(uint8_t op);
 int64_t opal_manage_flash(uint8_t op);
 int64_t opal_update_flash(uint64_t blk_list);
 int64_t opal_update_flash(uint64_t blk_list);

+ 1 - 1
arch/powerpc/include/asm/switch_to.h

@@ -35,7 +35,7 @@ extern void giveup_vsx(struct task_struct *);
 extern void enable_kernel_spe(void);
 extern void enable_kernel_spe(void);
 extern void giveup_spe(struct task_struct *);
 extern void giveup_spe(struct task_struct *);
 extern void load_up_spe(struct task_struct *);
 extern void load_up_spe(struct task_struct *);
-extern void switch_booke_debug_regs(struct thread_struct *new_thread);
+extern void switch_booke_debug_regs(struct debug_reg *new_debug);
 
 
 #ifndef CONFIG_SMP
 #ifndef CONFIG_SMP
 extern void discard_lazy_cpu_state(void);
 extern void discard_lazy_cpu_state(void);

+ 6 - 1
arch/powerpc/include/asm/unaligned.h

@@ -4,13 +4,18 @@
 #ifdef __KERNEL__
 #ifdef __KERNEL__
 
 
 /*
 /*
- * The PowerPC can do unaligned accesses itself in big endian mode.
+ * The PowerPC can do unaligned accesses itself based on its endian mode.
  */
  */
 #include <linux/unaligned/access_ok.h>
 #include <linux/unaligned/access_ok.h>
 #include <linux/unaligned/generic.h>
 #include <linux/unaligned/generic.h>
 
 
+#ifdef __LITTLE_ENDIAN__
+#define get_unaligned	__get_unaligned_le
+#define put_unaligned	__put_unaligned_le
+#else
 #define get_unaligned	__get_unaligned_be
 #define get_unaligned	__get_unaligned_be
 #define put_unaligned	__put_unaligned_be
 #define put_unaligned	__put_unaligned_be
+#endif
 
 
 #endif	/* __KERNEL__ */
 #endif	/* __KERNEL__ */
 #endif	/* _ASM_POWERPC_UNALIGNED_H */
 #endif	/* _ASM_POWERPC_UNALIGNED_H */

+ 1 - 0
arch/powerpc/kernel/asm-offsets.c

@@ -576,6 +576,7 @@ int main(void)
 	HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
 	HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
 	HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
 	HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
 	HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
 	HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
+	HSTATE_FIELD(HSTATE_SCRATCH2, scratch2);
 	HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
 	HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
 	HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
 	HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
 	HSTATE_FIELD(HSTATE_NAPPING, napping);
 	HSTATE_FIELD(HSTATE_NAPPING, napping);

+ 3 - 3
arch/powerpc/kernel/crash_dump.c

@@ -124,15 +124,15 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
 void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
 void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
 {
 {
 	unsigned long addr;
 	unsigned long addr;
-	const u32 *basep, *sizep;
+	const __be32 *basep, *sizep;
 	unsigned int rtas_start = 0, rtas_end = 0;
 	unsigned int rtas_start = 0, rtas_end = 0;
 
 
 	basep = of_get_property(rtas.dev, "linux,rtas-base", NULL);
 	basep = of_get_property(rtas.dev, "linux,rtas-base", NULL);
 	sizep = of_get_property(rtas.dev, "rtas-size", NULL);
 	sizep = of_get_property(rtas.dev, "rtas-size", NULL);
 
 
 	if (basep && sizep) {
 	if (basep && sizep) {
-		rtas_start = *basep;
-		rtas_end = *basep + *sizep;
+		rtas_start = be32_to_cpup(basep);
+		rtas_end = rtas_start + be32_to_cpup(sizep);
 	}
 	}
 
 
 	for (addr = begin; addr < end; addr += PAGE_SIZE) {
 	for (addr = begin; addr < end; addr += PAGE_SIZE) {

+ 2 - 0
arch/powerpc/kernel/head_64.S

@@ -80,6 +80,7 @@ END_FTR_SECTION(0, 1)
 	 * of the function that the cpu should jump to to continue
 	 * of the function that the cpu should jump to to continue
 	 * initialization.
 	 * initialization.
 	 */
 	 */
+	.balign 8
 	.globl  __secondary_hold_spinloop
 	.globl  __secondary_hold_spinloop
 __secondary_hold_spinloop:
 __secondary_hold_spinloop:
 	.llong	0x0
 	.llong	0x0
@@ -470,6 +471,7 @@ _STATIC(__after_prom_start)
 	mtctr	r8
 	mtctr	r8
 	bctr
 	bctr
 
 
+.balign 8
 p_end:	.llong	_end - _stext
 p_end:	.llong	_end - _stext
 
 
 4:	/* Now copy the rest of the kernel up to _end */
 4:	/* Now copy the rest of the kernel up to _end */

+ 16 - 16
arch/powerpc/kernel/process.c

@@ -339,7 +339,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
 #endif
 #endif
 }
 }
 
 
-static void prime_debug_regs(struct thread_struct *thread)
+static void prime_debug_regs(struct debug_reg *debug)
 {
 {
 	/*
 	/*
 	 * We could have inherited MSR_DE from userspace, since
 	 * We could have inherited MSR_DE from userspace, since
@@ -348,22 +348,22 @@ static void prime_debug_regs(struct thread_struct *thread)
 	 */
 	 */
 	mtmsr(mfmsr() & ~MSR_DE);
 	mtmsr(mfmsr() & ~MSR_DE);
 
 
-	mtspr(SPRN_IAC1, thread->debug.iac1);
-	mtspr(SPRN_IAC2, thread->debug.iac2);
+	mtspr(SPRN_IAC1, debug->iac1);
+	mtspr(SPRN_IAC2, debug->iac2);
 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
-	mtspr(SPRN_IAC3, thread->debug.iac3);
-	mtspr(SPRN_IAC4, thread->debug.iac4);
+	mtspr(SPRN_IAC3, debug->iac3);
+	mtspr(SPRN_IAC4, debug->iac4);
 #endif
 #endif
-	mtspr(SPRN_DAC1, thread->debug.dac1);
-	mtspr(SPRN_DAC2, thread->debug.dac2);
+	mtspr(SPRN_DAC1, debug->dac1);
+	mtspr(SPRN_DAC2, debug->dac2);
 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
-	mtspr(SPRN_DVC1, thread->debug.dvc1);
-	mtspr(SPRN_DVC2, thread->debug.dvc2);
+	mtspr(SPRN_DVC1, debug->dvc1);
+	mtspr(SPRN_DVC2, debug->dvc2);
 #endif
 #endif
-	mtspr(SPRN_DBCR0, thread->debug.dbcr0);
-	mtspr(SPRN_DBCR1, thread->debug.dbcr1);
+	mtspr(SPRN_DBCR0, debug->dbcr0);
+	mtspr(SPRN_DBCR1, debug->dbcr1);
 #ifdef CONFIG_BOOKE
 #ifdef CONFIG_BOOKE
-	mtspr(SPRN_DBCR2, thread->debug.dbcr2);
+	mtspr(SPRN_DBCR2, debug->dbcr2);
 #endif
 #endif
 }
 }
 /*
 /*
@@ -371,11 +371,11 @@ static void prime_debug_regs(struct thread_struct *thread)
  * debug registers, set the debug registers from the values
  * debug registers, set the debug registers from the values
  * stored in the new thread.
  * stored in the new thread.
  */
  */
-void switch_booke_debug_regs(struct thread_struct *new_thread)
+void switch_booke_debug_regs(struct debug_reg *new_debug)
 {
 {
 	if ((current->thread.debug.dbcr0 & DBCR0_IDM)
 	if ((current->thread.debug.dbcr0 & DBCR0_IDM)
-		|| (new_thread->debug.dbcr0 & DBCR0_IDM))
-			prime_debug_regs(new_thread);
+		|| (new_debug->dbcr0 & DBCR0_IDM))
+			prime_debug_regs(new_debug);
 }
 }
 EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
 EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
 #else	/* !CONFIG_PPC_ADV_DEBUG_REGS */
 #else	/* !CONFIG_PPC_ADV_DEBUG_REGS */
@@ -683,7 +683,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
 #endif /* CONFIG_SMP */
 #endif /* CONFIG_SMP */
 
 
 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
-	switch_booke_debug_regs(&new->thread);
+	switch_booke_debug_regs(&new->thread.debug);
 #else
 #else
 /*
 /*
  * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
  * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would

+ 13 - 9
arch/powerpc/kernel/prom_init.c

@@ -1986,19 +1986,23 @@ static void __init prom_init_stdout(void)
 	/* Get the full OF pathname of the stdout device */
 	/* Get the full OF pathname of the stdout device */
 	memset(path, 0, 256);
 	memset(path, 0, 256);
 	call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
 	call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
-	stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
-	val = cpu_to_be32(stdout_node);
-	prom_setprop(prom.chosen, "/chosen", "linux,stdout-package",
-		     &val, sizeof(val));
 	prom_printf("OF stdout device is: %s\n", of_stdout_device);
 	prom_printf("OF stdout device is: %s\n", of_stdout_device);
 	prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
 	prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
 		     path, strlen(path) + 1);
 		     path, strlen(path) + 1);
 
 
-	/* If it's a display, note it */
-	memset(type, 0, sizeof(type));
-	prom_getprop(stdout_node, "device_type", type, sizeof(type));
-	if (strcmp(type, "display") == 0)
-		prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
+	/* instance-to-package fails on PA-Semi */
+	stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
+	if (stdout_node != PROM_ERROR) {
+		val = cpu_to_be32(stdout_node);
+		prom_setprop(prom.chosen, "/chosen", "linux,stdout-package",
+			     &val, sizeof(val));
+
+		/* If it's a display, note it */
+		memset(type, 0, sizeof(type));
+		prom_getprop(stdout_node, "device_type", type, sizeof(type));
+		if (strcmp(type, "display") == 0)
+			prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
+	}
 }
 }
 
 
 static int __init prom_find_machine_type(void)
 static int __init prom_find_machine_type(void)

+ 2 - 2
arch/powerpc/kernel/ptrace.c

@@ -1555,7 +1555,7 @@ long arch_ptrace(struct task_struct *child, long request,
 
 
 			flush_fp_to_thread(child);
 			flush_fp_to_thread(child);
 			if (fpidx < (PT_FPSCR - PT_FPR0))
 			if (fpidx < (PT_FPSCR - PT_FPR0))
-				memcpy(&tmp, &child->thread.fp_state.fpr,
+				memcpy(&tmp, &child->thread.TS_FPR(fpidx),
 				       sizeof(long));
 				       sizeof(long));
 			else
 			else
 				tmp = child->thread.fp_state.fpscr;
 				tmp = child->thread.fp_state.fpscr;
@@ -1588,7 +1588,7 @@ long arch_ptrace(struct task_struct *child, long request,
 
 
 			flush_fp_to_thread(child);
 			flush_fp_to_thread(child);
 			if (fpidx < (PT_FPSCR - PT_FPR0))
 			if (fpidx < (PT_FPSCR - PT_FPR0))
-				memcpy(&child->thread.fp_state.fpr, &data,
+				memcpy(&child->thread.TS_FPR(fpidx), &data,
 				       sizeof(long));
 				       sizeof(long));
 			else
 			else
 				child->thread.fp_state.fpscr = data;
 				child->thread.fp_state.fpscr = data;

+ 2 - 2
arch/powerpc/kernel/setup-common.c

@@ -479,7 +479,7 @@ void __init smp_setup_cpu_maps(void)
 	if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) &&
 	if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) &&
 	    (dn = of_find_node_by_path("/rtas"))) {
 	    (dn = of_find_node_by_path("/rtas"))) {
 		int num_addr_cell, num_size_cell, maxcpus;
 		int num_addr_cell, num_size_cell, maxcpus;
-		const unsigned int *ireg;
+		const __be32 *ireg;
 
 
 		num_addr_cell = of_n_addr_cells(dn);
 		num_addr_cell = of_n_addr_cells(dn);
 		num_size_cell = of_n_size_cells(dn);
 		num_size_cell = of_n_size_cells(dn);
@@ -489,7 +489,7 @@ void __init smp_setup_cpu_maps(void)
 		if (!ireg)
 		if (!ireg)
 			goto out;
 			goto out;
 
 
-		maxcpus = ireg[num_addr_cell + num_size_cell];
+		maxcpus = be32_to_cpup(ireg + num_addr_cell + num_size_cell);
 
 
 		/* Double maxcpus for processors which have SMT capability */
 		/* Double maxcpus for processors which have SMT capability */
 		if (cpu_has_feature(CPU_FTR_SMT))
 		if (cpu_has_feature(CPU_FTR_SMT))

+ 2 - 2
arch/powerpc/kernel/smp.c

@@ -580,7 +580,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
 int cpu_to_core_id(int cpu)
 int cpu_to_core_id(int cpu)
 {
 {
 	struct device_node *np;
 	struct device_node *np;
-	const int *reg;
+	const __be32 *reg;
 	int id = -1;
 	int id = -1;
 
 
 	np = of_get_cpu_node(cpu, NULL);
 	np = of_get_cpu_node(cpu, NULL);
@@ -591,7 +591,7 @@ int cpu_to_core_id(int cpu)
 	if (!reg)
 	if (!reg)
 		goto out;
 		goto out;
 
 
-	id = *reg;
+	id = be32_to_cpup(reg);
 out:
 out:
 	of_node_put(np);
 	of_node_put(np);
 	return id;
 	return id;

+ 14 - 4
arch/powerpc/kvm/book3s_64_mmu_hv.c

@@ -469,11 +469,14 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
 		slb_v = vcpu->kvm->arch.vrma_slb_v;
 		slb_v = vcpu->kvm->arch.vrma_slb_v;
 	}
 	}
 
 
+	preempt_disable();
 	/* Find the HPTE in the hash table */
 	/* Find the HPTE in the hash table */
 	index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
 	index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
 					 HPTE_V_VALID | HPTE_V_ABSENT);
 					 HPTE_V_VALID | HPTE_V_ABSENT);
-	if (index < 0)
+	if (index < 0) {
+		preempt_enable();
 		return -ENOENT;
 		return -ENOENT;
+	}
 	hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
 	hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
 	v = hptep[0] & ~HPTE_V_HVLOCK;
 	v = hptep[0] & ~HPTE_V_HVLOCK;
 	gr = kvm->arch.revmap[index].guest_rpte;
 	gr = kvm->arch.revmap[index].guest_rpte;
@@ -481,6 +484,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
 	/* Unlock the HPTE */
 	/* Unlock the HPTE */
 	asm volatile("lwsync" : : : "memory");
 	asm volatile("lwsync" : : : "memory");
 	hptep[0] = v;
 	hptep[0] = v;
+	preempt_enable();
 
 
 	gpte->eaddr = eaddr;
 	gpte->eaddr = eaddr;
 	gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
 	gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
@@ -665,6 +669,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
 			return -EFAULT;
 			return -EFAULT;
 	} else {
 	} else {
 		page = pages[0];
 		page = pages[0];
+		pfn = page_to_pfn(page);
 		if (PageHuge(page)) {
 		if (PageHuge(page)) {
 			page = compound_head(page);
 			page = compound_head(page);
 			pte_size <<= compound_order(page);
 			pte_size <<= compound_order(page);
@@ -689,7 +694,6 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
 			}
 			}
 			rcu_read_unlock_sched();
 			rcu_read_unlock_sched();
 		}
 		}
-		pfn = page_to_pfn(page);
 	}
 	}
 
 
 	ret = -EFAULT;
 	ret = -EFAULT;
@@ -707,8 +711,14 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
 		r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M;
 		r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M;
 	}
 	}
 
 
-	/* Set the HPTE to point to pfn */
-	r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT);
+	/*
+	 * Set the HPTE to point to pfn.
+	 * Since the pfn is at PAGE_SIZE granularity, make sure we
+	 * don't mask out lower-order bits if psize < PAGE_SIZE.
+	 */
+	if (psize < PAGE_SIZE)
+		psize = PAGE_SIZE;
+	r = (r & ~(HPTE_R_PP0 - psize)) | ((pfn << PAGE_SHIFT) & ~(psize - 1));
 	if (hpte_is_writable(r) && !write_ok)
 	if (hpte_is_writable(r) && !write_ok)
 		r = hpte_make_readonly(r);
 		r = hpte_make_readonly(r);
 	ret = RESUME_GUEST;
 	ret = RESUME_GUEST;

+ 14 - 10
arch/powerpc/kvm/book3s_hv.c

@@ -131,8 +131,9 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
 static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
 static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
 {
 {
 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
+	unsigned long flags;
 
 
-	spin_lock(&vcpu->arch.tbacct_lock);
+	spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
 	if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE &&
 	if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE &&
 	    vc->preempt_tb != TB_NIL) {
 	    vc->preempt_tb != TB_NIL) {
 		vc->stolen_tb += mftb() - vc->preempt_tb;
 		vc->stolen_tb += mftb() - vc->preempt_tb;
@@ -143,19 +144,20 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
 		vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
 		vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
 		vcpu->arch.busy_preempt = TB_NIL;
 		vcpu->arch.busy_preempt = TB_NIL;
 	}
 	}
-	spin_unlock(&vcpu->arch.tbacct_lock);
+	spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
 }
 }
 
 
 static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
 static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
 {
 {
 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
+	unsigned long flags;
 
 
-	spin_lock(&vcpu->arch.tbacct_lock);
+	spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
 	if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
 	if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
 		vc->preempt_tb = mftb();
 		vc->preempt_tb = mftb();
 	if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
 	if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
 		vcpu->arch.busy_preempt = mftb();
 		vcpu->arch.busy_preempt = mftb();
-	spin_unlock(&vcpu->arch.tbacct_lock);
+	spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
 }
 }
 
 
 static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
 static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
@@ -486,11 +488,11 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
 	 */
 	 */
 	if (vc->vcore_state != VCORE_INACTIVE &&
 	if (vc->vcore_state != VCORE_INACTIVE &&
 	    vc->runner->arch.run_task != current) {
 	    vc->runner->arch.run_task != current) {
-		spin_lock(&vc->runner->arch.tbacct_lock);
+		spin_lock_irq(&vc->runner->arch.tbacct_lock);
 		p = vc->stolen_tb;
 		p = vc->stolen_tb;
 		if (vc->preempt_tb != TB_NIL)
 		if (vc->preempt_tb != TB_NIL)
 			p += now - vc->preempt_tb;
 			p += now - vc->preempt_tb;
-		spin_unlock(&vc->runner->arch.tbacct_lock);
+		spin_unlock_irq(&vc->runner->arch.tbacct_lock);
 	} else {
 	} else {
 		p = vc->stolen_tb;
 		p = vc->stolen_tb;
 	}
 	}
@@ -512,10 +514,10 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
 	core_stolen = vcore_stolen_time(vc, now);
 	core_stolen = vcore_stolen_time(vc, now);
 	stolen = core_stolen - vcpu->arch.stolen_logged;
 	stolen = core_stolen - vcpu->arch.stolen_logged;
 	vcpu->arch.stolen_logged = core_stolen;
 	vcpu->arch.stolen_logged = core_stolen;
-	spin_lock(&vcpu->arch.tbacct_lock);
+	spin_lock_irq(&vcpu->arch.tbacct_lock);
 	stolen += vcpu->arch.busy_stolen;
 	stolen += vcpu->arch.busy_stolen;
 	vcpu->arch.busy_stolen = 0;
 	vcpu->arch.busy_stolen = 0;
-	spin_unlock(&vcpu->arch.tbacct_lock);
+	spin_unlock_irq(&vcpu->arch.tbacct_lock);
 	if (!dt || !vpa)
 	if (!dt || !vpa)
 		return;
 		return;
 	memset(dt, 0, sizeof(struct dtl_entry));
 	memset(dt, 0, sizeof(struct dtl_entry));
@@ -589,7 +591,9 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
 		if (list_empty(&vcpu->kvm->arch.rtas_tokens))
 		if (list_empty(&vcpu->kvm->arch.rtas_tokens))
 			return RESUME_HOST;
 			return RESUME_HOST;
 
 
+		idx = srcu_read_lock(&vcpu->kvm->srcu);
 		rc = kvmppc_rtas_hcall(vcpu);
 		rc = kvmppc_rtas_hcall(vcpu);
+		srcu_read_unlock(&vcpu->kvm->srcu, idx);
 
 
 		if (rc == -ENOENT)
 		if (rc == -ENOENT)
 			return RESUME_HOST;
 			return RESUME_HOST;
@@ -1115,13 +1119,13 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
 
 
 	if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
 	if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
 		return;
 		return;
-	spin_lock(&vcpu->arch.tbacct_lock);
+	spin_lock_irq(&vcpu->arch.tbacct_lock);
 	now = mftb();
 	now = mftb();
 	vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
 	vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
 		vcpu->arch.stolen_logged;
 		vcpu->arch.stolen_logged;
 	vcpu->arch.busy_preempt = now;
 	vcpu->arch.busy_preempt = now;
 	vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
 	vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
-	spin_unlock(&vcpu->arch.tbacct_lock);
+	spin_unlock_irq(&vcpu->arch.tbacct_lock);
 	--vc->n_runnable;
 	--vc->n_runnable;
 	list_del(&vcpu->arch.run_list);
 	list_del(&vcpu->arch.run_list);
 }
 }

+ 7 - 2
arch/powerpc/kvm/book3s_hv_rm_mmu.c

@@ -225,6 +225,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
 		is_io = pa & (HPTE_R_I | HPTE_R_W);
 		is_io = pa & (HPTE_R_I | HPTE_R_W);
 		pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
 		pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
 		pa &= PAGE_MASK;
 		pa &= PAGE_MASK;
+		pa |= gpa & ~PAGE_MASK;
 	} else {
 	} else {
 		/* Translate to host virtual address */
 		/* Translate to host virtual address */
 		hva = __gfn_to_hva_memslot(memslot, gfn);
 		hva = __gfn_to_hva_memslot(memslot, gfn);
@@ -238,13 +239,13 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
 				ptel = hpte_make_readonly(ptel);
 				ptel = hpte_make_readonly(ptel);
 			is_io = hpte_cache_bits(pte_val(pte));
 			is_io = hpte_cache_bits(pte_val(pte));
 			pa = pte_pfn(pte) << PAGE_SHIFT;
 			pa = pte_pfn(pte) << PAGE_SHIFT;
+			pa |= hva & (pte_size - 1);
+			pa |= gpa & ~PAGE_MASK;
 		}
 		}
 	}
 	}
 
 
 	if (pte_size < psize)
 	if (pte_size < psize)
 		return H_PARAMETER;
 		return H_PARAMETER;
-	if (pa && pte_size > psize)
-		pa |= gpa & (pte_size - 1);
 
 
 	ptel &= ~(HPTE_R_PP0 - psize);
 	ptel &= ~(HPTE_R_PP0 - psize);
 	ptel |= pa;
 	ptel |= pa;
@@ -749,6 +750,10 @@ static int slb_base_page_shift[4] = {
 	20,	/* 1M, unsupported */
 	20,	/* 1M, unsupported */
 };
 };
 
 
+/* When called from virtmode, this func should be protected by
+ * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
+ * can trigger deadlock issue.
+ */
 long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
 long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
 			      unsigned long valid)
 			      unsigned long valid)
 {
 {

+ 13 - 10
arch/powerpc/kvm/book3s_hv_rmhandlers.S

@@ -153,7 +153,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 
 
 13:	b	machine_check_fwnmi
 13:	b	machine_check_fwnmi
 
 
-
 /*
 /*
  * We come in here when wakened from nap mode on a secondary hw thread.
  * We come in here when wakened from nap mode on a secondary hw thread.
  * Relocation is off and most register values are lost.
  * Relocation is off and most register values are lost.
@@ -224,6 +223,11 @@ kvm_start_guest:
 	/* Clear our vcpu pointer so we don't come back in early */
 	/* Clear our vcpu pointer so we don't come back in early */
 	li	r0, 0
 	li	r0, 0
 	std	r0, HSTATE_KVM_VCPU(r13)
 	std	r0, HSTATE_KVM_VCPU(r13)
+	/*
+	 * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing
+	 * the nap_count, because once the increment to nap_count is
+	 * visible we could be given another vcpu.
+	 */
 	lwsync
 	lwsync
 	/* Clear any pending IPI - we're an offline thread */
 	/* Clear any pending IPI - we're an offline thread */
 	ld	r5, HSTATE_XICS_PHYS(r13)
 	ld	r5, HSTATE_XICS_PHYS(r13)
@@ -241,7 +245,6 @@ kvm_start_guest:
 	/* increment the nap count and then go to nap mode */
 	/* increment the nap count and then go to nap mode */
 	ld	r4, HSTATE_KVM_VCORE(r13)
 	ld	r4, HSTATE_KVM_VCORE(r13)
 	addi	r4, r4, VCORE_NAP_COUNT
 	addi	r4, r4, VCORE_NAP_COUNT
-	lwsync				/* make previous updates visible */
 51:	lwarx	r3, 0, r4
 51:	lwarx	r3, 0, r4
 	addi	r3, r3, 1
 	addi	r3, r3, 1
 	stwcx.	r3, 0, r4
 	stwcx.	r3, 0, r4
@@ -751,15 +754,14 @@ kvmppc_interrupt_hv:
 	 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
 	 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
 	 * guest R13 saved in SPRN_SCRATCH0
 	 * guest R13 saved in SPRN_SCRATCH0
 	 */
 	 */
-	/* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
-	std	r9, HSTATE_HOST_R2(r13)
+	std	r9, HSTATE_SCRATCH2(r13)
 
 
 	lbz	r9, HSTATE_IN_GUEST(r13)
 	lbz	r9, HSTATE_IN_GUEST(r13)
 	cmpwi	r9, KVM_GUEST_MODE_HOST_HV
 	cmpwi	r9, KVM_GUEST_MODE_HOST_HV
 	beq	kvmppc_bad_host_intr
 	beq	kvmppc_bad_host_intr
 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
 	cmpwi	r9, KVM_GUEST_MODE_GUEST
 	cmpwi	r9, KVM_GUEST_MODE_GUEST
-	ld	r9, HSTATE_HOST_R2(r13)
+	ld	r9, HSTATE_SCRATCH2(r13)
 	beq	kvmppc_interrupt_pr
 	beq	kvmppc_interrupt_pr
 #endif
 #endif
 	/* We're now back in the host but in guest MMU context */
 	/* We're now back in the host but in guest MMU context */
@@ -779,7 +781,7 @@ kvmppc_interrupt_hv:
 	std	r6, VCPU_GPR(R6)(r9)
 	std	r6, VCPU_GPR(R6)(r9)
 	std	r7, VCPU_GPR(R7)(r9)
 	std	r7, VCPU_GPR(R7)(r9)
 	std	r8, VCPU_GPR(R8)(r9)
 	std	r8, VCPU_GPR(R8)(r9)
-	ld	r0, HSTATE_HOST_R2(r13)
+	ld	r0, HSTATE_SCRATCH2(r13)
 	std	r0, VCPU_GPR(R9)(r9)
 	std	r0, VCPU_GPR(R9)(r9)
 	std	r10, VCPU_GPR(R10)(r9)
 	std	r10, VCPU_GPR(R10)(r9)
 	std	r11, VCPU_GPR(R11)(r9)
 	std	r11, VCPU_GPR(R11)(r9)
@@ -990,14 +992,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
 	 */
 	 */
 	/* Increment the threads-exiting-guest count in the 0xff00
 	/* Increment the threads-exiting-guest count in the 0xff00
 	   bits of vcore->entry_exit_count */
 	   bits of vcore->entry_exit_count */
-	lwsync
 	ld	r5,HSTATE_KVM_VCORE(r13)
 	ld	r5,HSTATE_KVM_VCORE(r13)
 	addi	r6,r5,VCORE_ENTRY_EXIT
 	addi	r6,r5,VCORE_ENTRY_EXIT
 41:	lwarx	r3,0,r6
 41:	lwarx	r3,0,r6
 	addi	r0,r3,0x100
 	addi	r0,r3,0x100
 	stwcx.	r0,0,r6
 	stwcx.	r0,0,r6
 	bne	41b
 	bne	41b
-	lwsync
+	isync		/* order stwcx. vs. reading napping_threads */
 
 
 	/*
 	/*
 	 * At this point we have an interrupt that we have to pass
 	 * At this point we have an interrupt that we have to pass
@@ -1030,6 +1031,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
 	sld	r0,r0,r4
 	sld	r0,r0,r4
 	andc.	r3,r3,r0		/* no sense IPI'ing ourselves */
 	andc.	r3,r3,r0		/* no sense IPI'ing ourselves */
 	beq	43f
 	beq	43f
+	/* Order entry/exit update vs. IPIs */
+	sync
 	mulli	r4,r4,PACA_SIZE		/* get paca for thread 0 */
 	mulli	r4,r4,PACA_SIZE		/* get paca for thread 0 */
 	subf	r6,r4,r13
 	subf	r6,r4,r13
 42:	andi.	r0,r3,1
 42:	andi.	r0,r3,1
@@ -1638,10 +1641,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
 	bge	kvm_cede_exit
 	bge	kvm_cede_exit
 	stwcx.	r4,0,r6
 	stwcx.	r4,0,r6
 	bne	31b
 	bne	31b
+	/* order napping_threads update vs testing entry_exit_count */
+	isync
 	li	r0,1
 	li	r0,1
 	stb	r0,HSTATE_NAPPING(r13)
 	stb	r0,HSTATE_NAPPING(r13)
-	/* order napping_threads update vs testing entry_exit_count */
-	lwsync
 	mr	r4,r3
 	mr	r4,r3
 	lwz	r7,VCORE_ENTRY_EXIT(r5)
 	lwz	r7,VCORE_ENTRY_EXIT(r5)
 	cmpwi	r7,0x100
 	cmpwi	r7,0x100

+ 11 - 8
arch/powerpc/kvm/book3s_interrupts.S

@@ -129,29 +129,32 @@ kvm_start_lightweight:
 	 * R12      = exit handler id
 	 * R12      = exit handler id
 	 * R13      = PACA
 	 * R13      = PACA
 	 * SVCPU.*  = guest *
 	 * SVCPU.*  = guest *
+	 * MSR.EE   = 1
 	 *
 	 *
 	 */
 	 */
 
 
+	PPC_LL	r3, GPR4(r1)		/* vcpu pointer */
+
+	/*
+	 * kvmppc_copy_from_svcpu can clobber volatile registers, save
+	 * the exit handler id to the vcpu and restore it from there later.
+	 */
+	stw	r12, VCPU_TRAP(r3)
+
 	/* Transfer reg values from shadow vcpu back to vcpu struct */
 	/* Transfer reg values from shadow vcpu back to vcpu struct */
 	/* On 64-bit, interrupts are still off at this point */
 	/* On 64-bit, interrupts are still off at this point */
-	PPC_LL	r3, GPR4(r1)		/* vcpu pointer */
+
 	GET_SHADOW_VCPU(r4)
 	GET_SHADOW_VCPU(r4)
 	bl	FUNC(kvmppc_copy_from_svcpu)
 	bl	FUNC(kvmppc_copy_from_svcpu)
 	nop
 	nop
 
 
 #ifdef CONFIG_PPC_BOOK3S_64
 #ifdef CONFIG_PPC_BOOK3S_64
-	/* Re-enable interrupts */
-	ld	r3, HSTATE_HOST_MSR(r13)
-	ori	r3, r3, MSR_EE
-	MTMSR_EERI(r3)
-
 	/*
 	/*
 	 * Reload kernel SPRG3 value.
 	 * Reload kernel SPRG3 value.
 	 * No need to save guest value as usermode can't modify SPRG3.
 	 * No need to save guest value as usermode can't modify SPRG3.
 	 */
 	 */
 	ld	r3, PACA_SPRG3(r13)
 	ld	r3, PACA_SPRG3(r13)
 	mtspr	SPRN_SPRG3, r3
 	mtspr	SPRN_SPRG3, r3
-
 #endif /* CONFIG_PPC_BOOK3S_64 */
 #endif /* CONFIG_PPC_BOOK3S_64 */
 
 
 	/* R7 = vcpu */
 	/* R7 = vcpu */
@@ -177,7 +180,7 @@ kvm_start_lightweight:
 	PPC_STL	r31, VCPU_GPR(R31)(r7)
 	PPC_STL	r31, VCPU_GPR(R31)(r7)
 
 
 	/* Pass the exit number as 3rd argument to kvmppc_handle_exit */
 	/* Pass the exit number as 3rd argument to kvmppc_handle_exit */
-	mr	r5, r12
+	lwz	r5, VCPU_TRAP(r7)
 
 
 	/* Restore r3 (kvm_run) and r4 (vcpu) */
 	/* Restore r3 (kvm_run) and r4 (vcpu) */
 	REST_2GPRS(3, r1)
 	REST_2GPRS(3, r1)

+ 22 - 0
arch/powerpc/kvm/book3s_pr.c

@@ -66,6 +66,7 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
 	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 	memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
 	memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
 	svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
 	svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
+	svcpu->in_use = 0;
 	svcpu_put(svcpu);
 	svcpu_put(svcpu);
 #endif
 #endif
 	vcpu->cpu = smp_processor_id();
 	vcpu->cpu = smp_processor_id();
@@ -78,6 +79,9 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
 {
 {
 #ifdef CONFIG_PPC_BOOK3S_64
 #ifdef CONFIG_PPC_BOOK3S_64
 	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+	if (svcpu->in_use) {
+		kvmppc_copy_from_svcpu(vcpu, svcpu);
+	}
 	memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
 	memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
 	to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
 	to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
 	svcpu_put(svcpu);
 	svcpu_put(svcpu);
@@ -110,12 +114,26 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
 	svcpu->ctr = vcpu->arch.ctr;
 	svcpu->ctr = vcpu->arch.ctr;
 	svcpu->lr  = vcpu->arch.lr;
 	svcpu->lr  = vcpu->arch.lr;
 	svcpu->pc  = vcpu->arch.pc;
 	svcpu->pc  = vcpu->arch.pc;
+	svcpu->in_use = true;
 }
 }
 
 
 /* Copy data touched by real-mode code from shadow vcpu back to vcpu */
 /* Copy data touched by real-mode code from shadow vcpu back to vcpu */
 void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
 void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
 			    struct kvmppc_book3s_shadow_vcpu *svcpu)
 			    struct kvmppc_book3s_shadow_vcpu *svcpu)
 {
 {
+	/*
+	 * vcpu_put would just call us again because in_use hasn't
+	 * been updated yet.
+	 */
+	preempt_disable();
+
+	/*
+	 * Maybe we were already preempted and synced the svcpu from
+	 * our preempt notifiers. Don't bother touching this svcpu then.
+	 */
+	if (!svcpu->in_use)
+		goto out;
+
 	vcpu->arch.gpr[0] = svcpu->gpr[0];
 	vcpu->arch.gpr[0] = svcpu->gpr[0];
 	vcpu->arch.gpr[1] = svcpu->gpr[1];
 	vcpu->arch.gpr[1] = svcpu->gpr[1];
 	vcpu->arch.gpr[2] = svcpu->gpr[2];
 	vcpu->arch.gpr[2] = svcpu->gpr[2];
@@ -139,6 +157,10 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
 	vcpu->arch.fault_dar   = svcpu->fault_dar;
 	vcpu->arch.fault_dar   = svcpu->fault_dar;
 	vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
 	vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
 	vcpu->arch.last_inst   = svcpu->last_inst;
 	vcpu->arch.last_inst   = svcpu->last_inst;
+	svcpu->in_use = false;
+
+out:
+	preempt_enable();
 }
 }
 
 
 static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
 static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)

+ 1 - 5
arch/powerpc/kvm/book3s_rmhandlers.S

@@ -153,15 +153,11 @@ _GLOBAL(kvmppc_entry_trampoline)
 
 
 	li	r6, MSR_IR | MSR_DR
 	li	r6, MSR_IR | MSR_DR
 	andc	r6, r5, r6	/* Clear DR and IR in MSR value */
 	andc	r6, r5, r6	/* Clear DR and IR in MSR value */
-#ifdef CONFIG_PPC_BOOK3S_32
 	/*
 	/*
 	 * Set EE in HOST_MSR so that it's enabled when we get into our
 	 * Set EE in HOST_MSR so that it's enabled when we get into our
-	 * C exit handler function.  On 64-bit we delay enabling
-	 * interrupts until we have finished transferring stuff
-	 * to or from the PACA.
+	 * C exit handler function.
 	 */
 	 */
 	ori	r5, r5, MSR_EE
 	ori	r5, r5, MSR_EE
-#endif
 	mtsrr0	r7
 	mtsrr0	r7
 	mtsrr1	r6
 	mtsrr1	r6
 	RFI
 	RFI

+ 6 - 6
arch/powerpc/kvm/booke.c

@@ -681,7 +681,7 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 {
 {
 	int ret, s;
 	int ret, s;
-	struct thread_struct thread;
+	struct debug_reg debug;
 #ifdef CONFIG_PPC_FPU
 #ifdef CONFIG_PPC_FPU
 	struct thread_fp_state fp;
 	struct thread_fp_state fp;
 	int fpexc_mode;
 	int fpexc_mode;
@@ -723,9 +723,9 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 #endif
 #endif
 
 
 	/* Switch to guest debug context */
 	/* Switch to guest debug context */
-	thread.debug = vcpu->arch.shadow_dbg_reg;
-	switch_booke_debug_regs(&thread);
-	thread.debug = current->thread.debug;
+	debug = vcpu->arch.shadow_dbg_reg;
+	switch_booke_debug_regs(&debug);
+	debug = current->thread.debug;
 	current->thread.debug = vcpu->arch.shadow_dbg_reg;
 	current->thread.debug = vcpu->arch.shadow_dbg_reg;
 
 
 	kvmppc_fix_ee_before_entry();
 	kvmppc_fix_ee_before_entry();
@@ -736,8 +736,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 	   We also get here with interrupts enabled. */
 	   We also get here with interrupts enabled. */
 
 
 	/* Switch back to user space debug context */
 	/* Switch back to user space debug context */
-	switch_booke_debug_regs(&thread);
-	current->thread.debug = thread.debug;
+	switch_booke_debug_regs(&debug);
+	current->thread.debug = debug;
 
 
 #ifdef CONFIG_PPC_FPU
 #ifdef CONFIG_PPC_FPU
 	kvmppc_save_guest_fp(vcpu);
 	kvmppc_save_guest_fp(vcpu);

+ 38 - 15
arch/powerpc/lib/copyuser_64.S

@@ -9,6 +9,14 @@
 #include <asm/processor.h>
 #include <asm/processor.h>
 #include <asm/ppc_asm.h>
 #include <asm/ppc_asm.h>
 
 
+#ifdef __BIG_ENDIAN__
+#define sLd sld		/* Shift towards low-numbered address. */
+#define sHd srd		/* Shift towards high-numbered address. */
+#else
+#define sLd srd		/* Shift towards low-numbered address. */
+#define sHd sld		/* Shift towards high-numbered address. */
+#endif
+
 	.align	7
 	.align	7
 _GLOBAL(__copy_tofrom_user)
 _GLOBAL(__copy_tofrom_user)
 BEGIN_FTR_SECTION
 BEGIN_FTR_SECTION
@@ -118,10 +126,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
 
 
 24:	ld	r9,0(r4)	/* 3+2n loads, 2+2n stores */
 24:	ld	r9,0(r4)	/* 3+2n loads, 2+2n stores */
 25:	ld	r0,8(r4)
 25:	ld	r0,8(r4)
-	sld	r6,r9,r10
+	sLd	r6,r9,r10
 26:	ldu	r9,16(r4)
 26:	ldu	r9,16(r4)
-	srd	r7,r0,r11
-	sld	r8,r0,r10
+	sHd	r7,r0,r11
+	sLd	r8,r0,r10
 	or	r7,r7,r6
 	or	r7,r7,r6
 	blt	cr6,79f
 	blt	cr6,79f
 27:	ld	r0,8(r4)
 27:	ld	r0,8(r4)
@@ -129,35 +137,35 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
 
 
 28:	ld	r0,0(r4)	/* 4+2n loads, 3+2n stores */
 28:	ld	r0,0(r4)	/* 4+2n loads, 3+2n stores */
 29:	ldu	r9,8(r4)
 29:	ldu	r9,8(r4)
-	sld	r8,r0,r10
+	sLd	r8,r0,r10
 	addi	r3,r3,-8
 	addi	r3,r3,-8
 	blt	cr6,5f
 	blt	cr6,5f
 30:	ld	r0,8(r4)
 30:	ld	r0,8(r4)
-	srd	r12,r9,r11
-	sld	r6,r9,r10
+	sHd	r12,r9,r11
+	sLd	r6,r9,r10
 31:	ldu	r9,16(r4)
 31:	ldu	r9,16(r4)
 	or	r12,r8,r12
 	or	r12,r8,r12
-	srd	r7,r0,r11
-	sld	r8,r0,r10
+	sHd	r7,r0,r11
+	sLd	r8,r0,r10
 	addi	r3,r3,16
 	addi	r3,r3,16
 	beq	cr6,78f
 	beq	cr6,78f
 
 
 1:	or	r7,r7,r6
 1:	or	r7,r7,r6
 32:	ld	r0,8(r4)
 32:	ld	r0,8(r4)
 76:	std	r12,8(r3)
 76:	std	r12,8(r3)
-2:	srd	r12,r9,r11
-	sld	r6,r9,r10
+2:	sHd	r12,r9,r11
+	sLd	r6,r9,r10
 33:	ldu	r9,16(r4)
 33:	ldu	r9,16(r4)
 	or	r12,r8,r12
 	or	r12,r8,r12
 77:	stdu	r7,16(r3)
 77:	stdu	r7,16(r3)
-	srd	r7,r0,r11
-	sld	r8,r0,r10
+	sHd	r7,r0,r11
+	sLd	r8,r0,r10
 	bdnz	1b
 	bdnz	1b
 
 
 78:	std	r12,8(r3)
 78:	std	r12,8(r3)
 	or	r7,r7,r6
 	or	r7,r7,r6
 79:	std	r7,16(r3)
 79:	std	r7,16(r3)
-5:	srd	r12,r9,r11
+5:	sHd	r12,r9,r11
 	or	r12,r8,r12
 	or	r12,r8,r12
 80:	std	r12,24(r3)
 80:	std	r12,24(r3)
 	bne	6f
 	bne	6f
@@ -165,23 +173,38 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
 	blr
 	blr
 6:	cmpwi	cr1,r5,8
 6:	cmpwi	cr1,r5,8
 	addi	r3,r3,32
 	addi	r3,r3,32
-	sld	r9,r9,r10
+	sLd	r9,r9,r10
 	ble	cr1,7f
 	ble	cr1,7f
 34:	ld	r0,8(r4)
 34:	ld	r0,8(r4)
-	srd	r7,r0,r11
+	sHd	r7,r0,r11
 	or	r9,r7,r9
 	or	r9,r7,r9
 7:
 7:
 	bf	cr7*4+1,1f
 	bf	cr7*4+1,1f
+#ifdef __BIG_ENDIAN__
 	rotldi	r9,r9,32
 	rotldi	r9,r9,32
+#endif
 94:	stw	r9,0(r3)
 94:	stw	r9,0(r3)
+#ifdef __LITTLE_ENDIAN__
+	rotrdi	r9,r9,32
+#endif
 	addi	r3,r3,4
 	addi	r3,r3,4
 1:	bf	cr7*4+2,2f
 1:	bf	cr7*4+2,2f
+#ifdef __BIG_ENDIAN__
 	rotldi	r9,r9,16
 	rotldi	r9,r9,16
+#endif
 95:	sth	r9,0(r3)
 95:	sth	r9,0(r3)
+#ifdef __LITTLE_ENDIAN__
+	rotrdi	r9,r9,16
+#endif
 	addi	r3,r3,2
 	addi	r3,r3,2
 2:	bf	cr7*4+3,3f
 2:	bf	cr7*4+3,3f
+#ifdef __BIG_ENDIAN__
 	rotldi	r9,r9,8
 	rotldi	r9,r9,8
+#endif
 96:	stb	r9,0(r3)
 96:	stb	r9,0(r3)
+#ifdef __LITTLE_ENDIAN__
+	rotrdi	r9,r9,8
+#endif
 3:	li	r3,0
 3:	li	r3,0
 	blr
 	blr
 
 

+ 5 - 15
arch/powerpc/platforms/powernv/eeh-ioda.c

@@ -36,7 +36,6 @@
 #include "powernv.h"
 #include "powernv.h"
 #include "pci.h"
 #include "pci.h"
 
 
-static char *hub_diag = NULL;
 static int ioda_eeh_nb_init = 0;
 static int ioda_eeh_nb_init = 0;
 
 
 static int ioda_eeh_event(struct notifier_block *nb,
 static int ioda_eeh_event(struct notifier_block *nb,
@@ -140,15 +139,6 @@ static int ioda_eeh_post_init(struct pci_controller *hose)
 		ioda_eeh_nb_init = 1;
 		ioda_eeh_nb_init = 1;
 	}
 	}
 
 
-	/* We needn't HUB diag-data on PHB3 */
-	if (phb->type == PNV_PHB_IODA1 && !hub_diag) {
-		hub_diag = (char *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
-		if (!hub_diag) {
-			pr_err("%s: Out of memory !\n", __func__);
-			return -ENOMEM;
-		}
-	}
-
 #ifdef CONFIG_DEBUG_FS
 #ifdef CONFIG_DEBUG_FS
 	if (phb->dbgfs) {
 	if (phb->dbgfs) {
 		debugfs_create_file("err_injct_outbound", 0600,
 		debugfs_create_file("err_injct_outbound", 0600,
@@ -633,11 +623,10 @@ static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data)
 static void ioda_eeh_hub_diag(struct pci_controller *hose)
 static void ioda_eeh_hub_diag(struct pci_controller *hose)
 {
 {
 	struct pnv_phb *phb = hose->private_data;
 	struct pnv_phb *phb = hose->private_data;
-	struct OpalIoP7IOCErrorData *data;
+	struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag;
 	long rc;
 	long rc;
 
 
-	data = (struct OpalIoP7IOCErrorData *)ioda_eeh_hub_diag;
-	rc = opal_pci_get_hub_diag_data(phb->hub_id, data, PAGE_SIZE);
+	rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data));
 	if (rc != OPAL_SUCCESS) {
 	if (rc != OPAL_SUCCESS) {
 		pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n",
 		pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n",
 			   __func__, phb->hub_id, rc);
 			   __func__, phb->hub_id, rc);
@@ -820,14 +809,15 @@ static void ioda_eeh_phb_diag(struct pci_controller *hose)
 	struct OpalIoPhbErrorCommon *common;
 	struct OpalIoPhbErrorCommon *common;
 	long rc;
 	long rc;
 
 
-	common = (struct OpalIoPhbErrorCommon *)phb->diag.blob;
-	rc = opal_pci_get_phb_diag_data2(phb->opal_id, common, PAGE_SIZE);
+	rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
+					 PNV_PCI_DIAG_BUF_SIZE);
 	if (rc != OPAL_SUCCESS) {
 	if (rc != OPAL_SUCCESS) {
 		pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
 		pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
 			    __func__, hose->global_number, rc);
 			    __func__, hose->global_number, rc);
 		return;
 		return;
 	}
 	}
 
 
+	common = (struct OpalIoPhbErrorCommon *)phb->diag.blob;
 	switch (common->ioType) {
 	switch (common->ioType) {
 	case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
 	case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
 		ioda_eeh_p7ioc_phb_diag(hose, common);
 		ioda_eeh_p7ioc_phb_diag(hose, common);

+ 6 - 6
arch/powerpc/platforms/powernv/opal-lpc.c

@@ -24,25 +24,25 @@ static int opal_lpc_chip_id = -1;
 static u8 opal_lpc_inb(unsigned long port)
 static u8 opal_lpc_inb(unsigned long port)
 {
 {
 	int64_t rc;
 	int64_t rc;
-	uint32_t data;
+	__be32 data;
 
 
 	if (opal_lpc_chip_id < 0 || port > 0xffff)
 	if (opal_lpc_chip_id < 0 || port > 0xffff)
 		return 0xff;
 		return 0xff;
 	rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 1);
 	rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 1);
-	return rc ? 0xff : data;
+	return rc ? 0xff : be32_to_cpu(data);
 }
 }
 
 
 static __le16 __opal_lpc_inw(unsigned long port)
 static __le16 __opal_lpc_inw(unsigned long port)
 {
 {
 	int64_t rc;
 	int64_t rc;
-	uint32_t data;
+	__be32 data;
 
 
 	if (opal_lpc_chip_id < 0 || port > 0xfffe)
 	if (opal_lpc_chip_id < 0 || port > 0xfffe)
 		return 0xffff;
 		return 0xffff;
 	if (port & 1)
 	if (port & 1)
 		return (__le16)opal_lpc_inb(port) << 8 | opal_lpc_inb(port + 1);
 		return (__le16)opal_lpc_inb(port) << 8 | opal_lpc_inb(port + 1);
 	rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 2);
 	rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 2);
-	return rc ? 0xffff : data;
+	return rc ? 0xffff : be32_to_cpu(data);
 }
 }
 static u16 opal_lpc_inw(unsigned long port)
 static u16 opal_lpc_inw(unsigned long port)
 {
 {
@@ -52,7 +52,7 @@ static u16 opal_lpc_inw(unsigned long port)
 static __le32 __opal_lpc_inl(unsigned long port)
 static __le32 __opal_lpc_inl(unsigned long port)
 {
 {
 	int64_t rc;
 	int64_t rc;
-	uint32_t data;
+	__be32 data;
 
 
 	if (opal_lpc_chip_id < 0 || port > 0xfffc)
 	if (opal_lpc_chip_id < 0 || port > 0xfffc)
 		return 0xffffffff;
 		return 0xffffffff;
@@ -62,7 +62,7 @@ static __le32 __opal_lpc_inl(unsigned long port)
 		       (__le32)opal_lpc_inb(port + 2) <<  8 |
 		       (__le32)opal_lpc_inb(port + 2) <<  8 |
 			       opal_lpc_inb(port + 3);
 			       opal_lpc_inb(port + 3);
 	rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 4);
 	rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 4);
-	return rc ? 0xffffffff : data;
+	return rc ? 0xffffffff : be32_to_cpu(data);
 }
 }
 
 
 static u32 opal_lpc_inl(unsigned long port)
 static u32 opal_lpc_inl(unsigned long port)

+ 3 - 1
arch/powerpc/platforms/powernv/opal-xscom.c

@@ -96,9 +96,11 @@ static int opal_scom_read(scom_map_t map, u64 reg, u64 *value)
 {
 {
 	struct opal_scom_map *m = map;
 	struct opal_scom_map *m = map;
 	int64_t rc;
 	int64_t rc;
+	__be64 v;
 
 
 	reg = opal_scom_unmangle(reg);
 	reg = opal_scom_unmangle(reg);
-	rc = opal_xscom_read(m->chip, m->addr + reg, (uint64_t *)__pa(value));
+	rc = opal_xscom_read(m->chip, m->addr + reg, (__be64 *)__pa(&v));
+	*value = be64_to_cpu(v);
 	return opal_xscom_err_xlate(rc);
 	return opal_xscom_err_xlate(rc);
 }
 }
 
 

+ 3 - 1
arch/powerpc/platforms/powernv/pci.h

@@ -172,11 +172,13 @@ struct pnv_phb {
 		} ioda;
 		} ioda;
 	};
 	};
 
 
-	/* PHB status structure */
+	/* PHB and hub status structure */
 	union {
 	union {
 		unsigned char			blob[PNV_PCI_DIAG_BUF_SIZE];
 		unsigned char			blob[PNV_PCI_DIAG_BUF_SIZE];
 		struct OpalIoP7IOCPhbErrorData	p7ioc;
 		struct OpalIoP7IOCPhbErrorData	p7ioc;
+		struct OpalIoP7IOCErrorData 	hub_diag;
 	} diag;
 	} diag;
+
 };
 };
 
 
 extern struct pci_ops pnv_pci_ops;
 extern struct pci_ops pnv_pci_ops;

+ 6 - 6
arch/powerpc/platforms/pseries/lparcfg.c

@@ -157,7 +157,7 @@ static void parse_ppp_data(struct seq_file *m)
 {
 {
 	struct hvcall_ppp_data ppp_data;
 	struct hvcall_ppp_data ppp_data;
 	struct device_node *root;
 	struct device_node *root;
-	const int *perf_level;
+	const __be32 *perf_level;
 	int rc;
 	int rc;
 
 
 	rc = h_get_ppp(&ppp_data);
 	rc = h_get_ppp(&ppp_data);
@@ -201,7 +201,7 @@ static void parse_ppp_data(struct seq_file *m)
 		perf_level = of_get_property(root,
 		perf_level = of_get_property(root,
 				"ibm,partition-performance-parameters-level",
 				"ibm,partition-performance-parameters-level",
 					     NULL);
 					     NULL);
-		if (perf_level && (*perf_level >= 1)) {
+		if (perf_level && (be32_to_cpup(perf_level) >= 1)) {
 			seq_printf(m,
 			seq_printf(m,
 			    "physical_procs_allocated_to_virtualization=%d\n",
 			    "physical_procs_allocated_to_virtualization=%d\n",
 				   ppp_data.phys_platform_procs);
 				   ppp_data.phys_platform_procs);
@@ -435,7 +435,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
 	int partition_potential_processors;
 	int partition_potential_processors;
 	int partition_active_processors;
 	int partition_active_processors;
 	struct device_node *rtas_node;
 	struct device_node *rtas_node;
-	const int *lrdrp = NULL;
+	const __be32 *lrdrp = NULL;
 
 
 	rtas_node = of_find_node_by_path("/rtas");
 	rtas_node = of_find_node_by_path("/rtas");
 	if (rtas_node)
 	if (rtas_node)
@@ -444,7 +444,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
 	if (lrdrp == NULL) {
 	if (lrdrp == NULL) {
 		partition_potential_processors = vdso_data->processorCount;
 		partition_potential_processors = vdso_data->processorCount;
 	} else {
 	} else {
-		partition_potential_processors = *(lrdrp + 4);
+		partition_potential_processors = be32_to_cpup(lrdrp + 4);
 	}
 	}
 	of_node_put(rtas_node);
 	of_node_put(rtas_node);
 
 
@@ -654,7 +654,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
 	const char *model = "";
 	const char *model = "";
 	const char *system_id = "";
 	const char *system_id = "";
 	const char *tmp;
 	const char *tmp;
-	const unsigned int *lp_index_ptr;
+	const __be32 *lp_index_ptr;
 	unsigned int lp_index = 0;
 	unsigned int lp_index = 0;
 
 
 	seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS);
 	seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS);
@@ -670,7 +670,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
 		lp_index_ptr = of_get_property(rootdn, "ibm,partition-no",
 		lp_index_ptr = of_get_property(rootdn, "ibm,partition-no",
 					NULL);
 					NULL);
 		if (lp_index_ptr)
 		if (lp_index_ptr)
-			lp_index = *lp_index_ptr;
+			lp_index = be32_to_cpup(lp_index_ptr);
 		of_node_put(rootdn);
 		of_node_put(rootdn);
 	}
 	}
 	seq_printf(m, "serial_number=%s\n", system_id);
 	seq_printf(m, "serial_number=%s\n", system_id);

+ 15 - 13
arch/powerpc/platforms/pseries/msi.c

@@ -130,7 +130,8 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name)
 {
 {
 	struct device_node *dn;
 	struct device_node *dn;
 	struct pci_dn *pdn;
 	struct pci_dn *pdn;
-	const u32 *req_msi;
+	const __be32 *p;
+	u32 req_msi;
 
 
 	pdn = pci_get_pdn(pdev);
 	pdn = pci_get_pdn(pdev);
 	if (!pdn)
 	if (!pdn)
@@ -138,19 +139,20 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name)
 
 
 	dn = pdn->node;
 	dn = pdn->node;
 
 
-	req_msi = of_get_property(dn, prop_name, NULL);
-	if (!req_msi) {
+	p = of_get_property(dn, prop_name, NULL);
+	if (!p) {
 		pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name);
 		pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name);
 		return -ENOENT;
 		return -ENOENT;
 	}
 	}
 
 
-	if (*req_msi < nvec) {
+	req_msi = be32_to_cpup(p);
+	if (req_msi < nvec) {
 		pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec);
 		pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec);
 
 
-		if (*req_msi == 0) /* Be paranoid */
+		if (req_msi == 0) /* Be paranoid */
 			return -ENOSPC;
 			return -ENOSPC;
 
 
-		return *req_msi;
+		return req_msi;
 	}
 	}
 
 
 	return 0;
 	return 0;
@@ -171,7 +173,7 @@ static int check_req_msix(struct pci_dev *pdev, int nvec)
 static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
 static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
 {
 {
 	struct device_node *dn;
 	struct device_node *dn;
-	const u32 *p;
+	const __be32 *p;
 
 
 	dn = of_node_get(pci_device_to_OF_node(dev));
 	dn = of_node_get(pci_device_to_OF_node(dev));
 	while (dn) {
 	while (dn) {
@@ -179,7 +181,7 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
 		if (p) {
 		if (p) {
 			pr_debug("rtas_msi: found prop on dn %s\n",
 			pr_debug("rtas_msi: found prop on dn %s\n",
 				dn->full_name);
 				dn->full_name);
-			*total = *p;
+			*total = be32_to_cpup(p);
 			return dn;
 			return dn;
 		}
 		}
 
 
@@ -232,13 +234,13 @@ struct msi_counts {
 static void *count_non_bridge_devices(struct device_node *dn, void *data)
 static void *count_non_bridge_devices(struct device_node *dn, void *data)
 {
 {
 	struct msi_counts *counts = data;
 	struct msi_counts *counts = data;
-	const u32 *p;
+	const __be32 *p;
 	u32 class;
 	u32 class;
 
 
 	pr_debug("rtas_msi: counting %s\n", dn->full_name);
 	pr_debug("rtas_msi: counting %s\n", dn->full_name);
 
 
 	p = of_get_property(dn, "class-code", NULL);
 	p = of_get_property(dn, "class-code", NULL);
-	class = p ? *p : 0;
+	class = p ? be32_to_cpup(p) : 0;
 
 
 	if ((class >> 8) != PCI_CLASS_BRIDGE_PCI)
 	if ((class >> 8) != PCI_CLASS_BRIDGE_PCI)
 		counts->num_devices++;
 		counts->num_devices++;
@@ -249,7 +251,7 @@ static void *count_non_bridge_devices(struct device_node *dn, void *data)
 static void *count_spare_msis(struct device_node *dn, void *data)
 static void *count_spare_msis(struct device_node *dn, void *data)
 {
 {
 	struct msi_counts *counts = data;
 	struct msi_counts *counts = data;
-	const u32 *p;
+	const __be32 *p;
 	int req;
 	int req;
 
 
 	if (dn == counts->requestor)
 	if (dn == counts->requestor)
@@ -260,11 +262,11 @@ static void *count_spare_msis(struct device_node *dn, void *data)
 		req = 0;
 		req = 0;
 		p = of_get_property(dn, "ibm,req#msi", NULL);
 		p = of_get_property(dn, "ibm,req#msi", NULL);
 		if (p)
 		if (p)
-			req = *p;
+			req = be32_to_cpup(p);
 
 
 		p = of_get_property(dn, "ibm,req#msi-x", NULL);
 		p = of_get_property(dn, "ibm,req#msi-x", NULL);
 		if (p)
 		if (p)
-			req = max(req, (int)*p);
+			req = max(req, (int)be32_to_cpup(p));
 	}
 	}
 
 
 	if (req < counts->quota)
 	if (req < counts->quota)

+ 23 - 23
arch/powerpc/platforms/pseries/nvram.c

@@ -43,8 +43,8 @@ static char nvram_buf[NVRW_CNT];	/* assume this is in the first 4GB */
 static DEFINE_SPINLOCK(nvram_lock);
 static DEFINE_SPINLOCK(nvram_lock);
 
 
 struct err_log_info {
 struct err_log_info {
-	int error_type;
-	unsigned int seq_num;
+	__be32 error_type;
+	__be32 seq_num;
 };
 };
 
 
 struct nvram_os_partition {
 struct nvram_os_partition {
@@ -79,9 +79,9 @@ static const char *pseries_nvram_os_partitions[] = {
 };
 };
 
 
 struct oops_log_info {
 struct oops_log_info {
-	u16 version;
-	u16 report_length;
-	u64 timestamp;
+	__be16 version;
+	__be16 report_length;
+	__be64 timestamp;
 } __attribute__((packed));
 } __attribute__((packed));
 
 
 static void oops_to_nvram(struct kmsg_dumper *dumper,
 static void oops_to_nvram(struct kmsg_dumper *dumper,
@@ -291,8 +291,8 @@ int nvram_write_os_partition(struct nvram_os_partition *part, char * buff,
 		length = part->size;
 		length = part->size;
 	}
 	}
 
 
-	info.error_type = err_type;
-	info.seq_num = error_log_cnt;
+	info.error_type = cpu_to_be32(err_type);
+	info.seq_num = cpu_to_be32(error_log_cnt);
 
 
 	tmp_index = part->index;
 	tmp_index = part->index;
 
 
@@ -364,8 +364,8 @@ int nvram_read_partition(struct nvram_os_partition *part, char *buff,
 	}
 	}
 
 
 	if (part->os_partition) {
 	if (part->os_partition) {
-		*error_log_cnt = info.seq_num;
-		*err_type = info.error_type;
+		*error_log_cnt = be32_to_cpu(info.seq_num);
+		*err_type = be32_to_cpu(info.error_type);
 	}
 	}
 
 
 	return 0;
 	return 0;
@@ -529,9 +529,9 @@ static int zip_oops(size_t text_len)
 		pr_err("nvram: logging uncompressed oops/panic report\n");
 		pr_err("nvram: logging uncompressed oops/panic report\n");
 		return -1;
 		return -1;
 	}
 	}
-	oops_hdr->version = OOPS_HDR_VERSION;
-	oops_hdr->report_length = (u16) zipped_len;
-	oops_hdr->timestamp = get_seconds();
+	oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
+	oops_hdr->report_length = cpu_to_be16(zipped_len);
+	oops_hdr->timestamp = cpu_to_be64(get_seconds());
 	return 0;
 	return 0;
 }
 }
 
 
@@ -574,9 +574,9 @@ static int nvram_pstore_write(enum pstore_type_id type,
 				clobbering_unread_rtas_event())
 				clobbering_unread_rtas_event())
 		return -1;
 		return -1;
 
 
-	oops_hdr->version = OOPS_HDR_VERSION;
-	oops_hdr->report_length = (u16) size;
-	oops_hdr->timestamp = get_seconds();
+	oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
+	oops_hdr->report_length = cpu_to_be16(size);
+	oops_hdr->timestamp = cpu_to_be64(get_seconds());
 
 
 	if (compressed)
 	if (compressed)
 		err_type = ERR_TYPE_KERNEL_PANIC_GZ;
 		err_type = ERR_TYPE_KERNEL_PANIC_GZ;
@@ -670,16 +670,16 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
 		size_t length, hdr_size;
 		size_t length, hdr_size;
 
 
 		oops_hdr = (struct oops_log_info *)buff;
 		oops_hdr = (struct oops_log_info *)buff;
-		if (oops_hdr->version < OOPS_HDR_VERSION) {
+		if (be16_to_cpu(oops_hdr->version) < OOPS_HDR_VERSION) {
 			/* Old format oops header had 2-byte record size */
 			/* Old format oops header had 2-byte record size */
 			hdr_size = sizeof(u16);
 			hdr_size = sizeof(u16);
-			length = oops_hdr->version;
+			length = be16_to_cpu(oops_hdr->version);
 			time->tv_sec = 0;
 			time->tv_sec = 0;
 			time->tv_nsec = 0;
 			time->tv_nsec = 0;
 		} else {
 		} else {
 			hdr_size = sizeof(*oops_hdr);
 			hdr_size = sizeof(*oops_hdr);
-			length = oops_hdr->report_length;
-			time->tv_sec = oops_hdr->timestamp;
+			length = be16_to_cpu(oops_hdr->report_length);
+			time->tv_sec = be64_to_cpu(oops_hdr->timestamp);
 			time->tv_nsec = 0;
 			time->tv_nsec = 0;
 		}
 		}
 		*buf = kmalloc(length, GFP_KERNEL);
 		*buf = kmalloc(length, GFP_KERNEL);
@@ -889,13 +889,13 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
 		kmsg_dump_get_buffer(dumper, false,
 		kmsg_dump_get_buffer(dumper, false,
 				     oops_data, oops_data_sz, &text_len);
 				     oops_data, oops_data_sz, &text_len);
 		err_type = ERR_TYPE_KERNEL_PANIC;
 		err_type = ERR_TYPE_KERNEL_PANIC;
-		oops_hdr->version = OOPS_HDR_VERSION;
-		oops_hdr->report_length = (u16) text_len;
-		oops_hdr->timestamp = get_seconds();
+		oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
+		oops_hdr->report_length = cpu_to_be16(text_len);
+		oops_hdr->timestamp = cpu_to_be64(get_seconds());
 	}
 	}
 
 
 	(void) nvram_write_os_partition(&oops_log_partition, oops_buf,
 	(void) nvram_write_os_partition(&oops_log_partition, oops_buf,
-		(int) (sizeof(*oops_hdr) + oops_hdr->report_length), err_type,
+		(int) (sizeof(*oops_hdr) + text_len), err_type,
 		++oops_count);
 		++oops_count);
 
 
 	spin_unlock_irqrestore(&lock, flags);
 	spin_unlock_irqrestore(&lock, flags);

+ 4 - 4
arch/powerpc/platforms/pseries/pci.c

@@ -113,7 +113,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
 {
 {
 	struct device_node *dn, *pdn;
 	struct device_node *dn, *pdn;
 	struct pci_bus *bus;
 	struct pci_bus *bus;
-	const uint32_t *pcie_link_speed_stats;
+	const __be32 *pcie_link_speed_stats;
 
 
 	bus = bridge->bus;
 	bus = bridge->bus;
 
 
@@ -122,7 +122,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
 		return 0;
 		return 0;
 
 
 	for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) {
 	for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) {
-		pcie_link_speed_stats = (const uint32_t *) of_get_property(pdn,
+		pcie_link_speed_stats = of_get_property(pdn,
 			"ibm,pcie-link-speed-stats", NULL);
 			"ibm,pcie-link-speed-stats", NULL);
 		if (pcie_link_speed_stats)
 		if (pcie_link_speed_stats)
 			break;
 			break;
@@ -135,7 +135,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
 		return 0;
 		return 0;
 	}
 	}
 
 
-	switch (pcie_link_speed_stats[0]) {
+	switch (be32_to_cpup(pcie_link_speed_stats)) {
 	case 0x01:
 	case 0x01:
 		bus->max_bus_speed = PCIE_SPEED_2_5GT;
 		bus->max_bus_speed = PCIE_SPEED_2_5GT;
 		break;
 		break;
@@ -147,7 +147,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
 		break;
 		break;
 	}
 	}
 
 
-	switch (pcie_link_speed_stats[1]) {
+	switch (be32_to_cpup(pcie_link_speed_stats)) {
 	case 0x01:
 	case 0x01:
 		bus->cur_bus_speed = PCIE_SPEED_2_5GT;
 		bus->cur_bus_speed = PCIE_SPEED_2_5GT;
 		break;
 		break;

+ 0 - 1
arch/s390/Kconfig

@@ -135,7 +135,6 @@ config S390
 	select HAVE_SYSCALL_TRACEPOINTS
 	select HAVE_SYSCALL_TRACEPOINTS
 	select HAVE_UID16 if 32BIT
 	select HAVE_UID16 if 32BIT
 	select HAVE_VIRT_CPU_ACCOUNTING
 	select HAVE_VIRT_CPU_ACCOUNTING
-	select INIT_ALL_POSSIBLE
 	select KTIME_SCALAR if 32BIT
 	select KTIME_SCALAR if 32BIT
 	select MODULES_USE_ELF_RELA
 	select MODULES_USE_ELF_RELA
 	select OLD_SIGACTION
 	select OLD_SIGACTION

+ 2 - 0
arch/s390/include/asm/smp.h

@@ -31,6 +31,7 @@ extern void smp_yield(void);
 extern void smp_stop_cpu(void);
 extern void smp_stop_cpu(void);
 extern void smp_cpu_set_polarization(int cpu, int val);
 extern void smp_cpu_set_polarization(int cpu, int val);
 extern int smp_cpu_get_polarization(int cpu);
 extern int smp_cpu_get_polarization(int cpu);
+extern void smp_fill_possible_mask(void);
 
 
 #else /* CONFIG_SMP */
 #else /* CONFIG_SMP */
 
 
@@ -50,6 +51,7 @@ static inline int smp_vcpu_scheduled(int cpu) { return 1; }
 static inline void smp_yield_cpu(int cpu) { }
 static inline void smp_yield_cpu(int cpu) { }
 static inline void smp_yield(void) { }
 static inline void smp_yield(void) { }
 static inline void smp_stop_cpu(void) { }
 static inline void smp_stop_cpu(void) { }
+static inline void smp_fill_possible_mask(void) { }
 
 
 #endif /* CONFIG_SMP */
 #endif /* CONFIG_SMP */
 
 

+ 1 - 0
arch/s390/kernel/setup.c

@@ -1023,6 +1023,7 @@ void __init setup_arch(char **cmdline_p)
 	setup_vmcoreinfo();
 	setup_vmcoreinfo();
 	setup_lowcore();
 	setup_lowcore();
 
 
+	smp_fill_possible_mask();
         cpu_init();
         cpu_init();
 	s390_init_cpu_topology();
 	s390_init_cpu_topology();
 
 

+ 16 - 9
arch/s390/kernel/smp.c

@@ -721,18 +721,14 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
 	return 0;
 	return 0;
 }
 }
 
 
-static int __init setup_possible_cpus(char *s)
-{
-	int max, cpu;
+static unsigned int setup_possible_cpus __initdata;
 
 
-	if (kstrtoint(s, 0, &max) < 0)
-		return 0;
-	init_cpu_possible(cpumask_of(0));
-	for (cpu = 1; cpu < max && cpu < nr_cpu_ids; cpu++)
-		set_cpu_possible(cpu, true);
+static int __init _setup_possible_cpus(char *s)
+{
+	get_option(&s, &setup_possible_cpus);
 	return 0;
 	return 0;
 }
 }
-early_param("possible_cpus", setup_possible_cpus);
+early_param("possible_cpus", _setup_possible_cpus);
 
 
 #ifdef CONFIG_HOTPLUG_CPU
 #ifdef CONFIG_HOTPLUG_CPU
 
 
@@ -775,6 +771,17 @@ void __noreturn cpu_die(void)
 
 
 #endif /* CONFIG_HOTPLUG_CPU */
 #endif /* CONFIG_HOTPLUG_CPU */
 
 
+void __init smp_fill_possible_mask(void)
+{
+	unsigned int possible, cpu;
+
+	possible = setup_possible_cpus;
+	if (!possible)
+		possible = MACHINE_IS_VM ? 64 : nr_cpu_ids;
+	for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
+		set_cpu_possible(cpu, true);
+}
+
 void __init smp_prepare_cpus(unsigned int max_cpus)
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
 {
 	/* request the 0x1201 emergency signal external interrupt */
 	/* request the 0x1201 emergency signal external interrupt */

+ 2 - 0
arch/s390/pci/pci_event.c

@@ -75,6 +75,7 @@ void zpci_event_availability(void *data)
 		if (!zdev || zdev->state == ZPCI_FN_STATE_CONFIGURED)
 		if (!zdev || zdev->state == ZPCI_FN_STATE_CONFIGURED)
 			break;
 			break;
 		zdev->state = ZPCI_FN_STATE_CONFIGURED;
 		zdev->state = ZPCI_FN_STATE_CONFIGURED;
+		zdev->fh = ccdf->fh;
 		ret = zpci_enable_device(zdev);
 		ret = zpci_enable_device(zdev);
 		if (ret)
 		if (ret)
 			break;
 			break;
@@ -101,6 +102,7 @@ void zpci_event_availability(void *data)
 		if (pdev)
 		if (pdev)
 			pci_stop_and_remove_bus_device(pdev);
 			pci_stop_and_remove_bus_device(pdev);
 
 
+		zdev->fh = ccdf->fh;
 		zpci_disable_device(zdev);
 		zpci_disable_device(zdev);
 		zdev->state = ZPCI_FN_STATE_STANDBY;
 		zdev->state = ZPCI_FN_STATE_STANDBY;
 		break;
 		break;

+ 5 - 0
arch/sh/kernel/sh_ksyms_32.c

@@ -20,6 +20,11 @@ EXPORT_SYMBOL(csum_partial_copy_generic);
 EXPORT_SYMBOL(copy_page);
 EXPORT_SYMBOL(copy_page);
 EXPORT_SYMBOL(__clear_user);
 EXPORT_SYMBOL(__clear_user);
 EXPORT_SYMBOL(empty_zero_page);
 EXPORT_SYMBOL(empty_zero_page);
+#ifdef CONFIG_FLATMEM
+/* need in pfn_valid macro */
+EXPORT_SYMBOL(min_low_pfn);
+EXPORT_SYMBOL(max_low_pfn);
+#endif
 
 
 #define DECLARE_EXPORT(name)		\
 #define DECLARE_EXPORT(name)		\
 	extern void name(void);EXPORT_SYMBOL(name)
 	extern void name(void);EXPORT_SYMBOL(name)

+ 1 - 1
arch/sh/lib/Makefile

@@ -6,7 +6,7 @@ lib-y  = delay.o memmove.o memchr.o \
 	 checksum.o strlen.o div64.o div64-generic.o
 	 checksum.o strlen.o div64.o div64-generic.o
 
 
 # Extracted from libgcc
 # Extracted from libgcc
-lib-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \
+obj-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \
 	 ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \
 	 ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \
 	 udiv_qrnnd.o
 	 udiv_qrnnd.o
 
 

+ 2 - 2
arch/sparc/include/asm/pgtable_64.h

@@ -619,7 +619,7 @@ static inline unsigned long pte_present(pte_t pte)
 }
 }
 
 
 #define pte_accessible pte_accessible
 #define pte_accessible pte_accessible
-static inline unsigned long pte_accessible(pte_t a)
+static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
 {
 {
 	return pte_val(a) & _PAGE_VALID;
 	return pte_val(a) & _PAGE_VALID;
 }
 }
@@ -847,7 +847,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
 	 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
 	 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
 	 *             and SUN4V pte layout, so this inline test is fine.
 	 *             and SUN4V pte layout, so this inline test is fine.
 	 */
 	 */
-	if (likely(mm != &init_mm) && pte_accessible(orig))
+	if (likely(mm != &init_mm) && pte_accessible(mm, orig))
 		tlb_batch_add(mm, addr, ptep, orig, fullmm);
 		tlb_batch_add(mm, addr, ptep, orig, fullmm);
 }
 }
 
 

+ 2 - 2
arch/sparc/include/asm/uaccess_64.h

@@ -262,8 +262,8 @@ extern unsigned long __must_check __clear_user(void __user *, unsigned long);
 extern __must_check long strlen_user(const char __user *str);
 extern __must_check long strlen_user(const char __user *str);
 extern __must_check long strnlen_user(const char __user *str, long n);
 extern __must_check long strnlen_user(const char __user *str, long n);
 
 
-#define __copy_to_user_inatomic ___copy_to_user
-#define __copy_from_user_inatomic ___copy_from_user
+#define __copy_to_user_inatomic __copy_to_user
+#define __copy_from_user_inatomic __copy_from_user
 
 
 struct pt_regs;
 struct pt_regs;
 extern unsigned long compute_effective_address(struct pt_regs *,
 extern unsigned long compute_effective_address(struct pt_regs *,

+ 1 - 1
arch/sparc/kernel/iommu.c

@@ -854,7 +854,7 @@ int dma_supported(struct device *dev, u64 device_mask)
 		return 1;
 		return 1;
 
 
 #ifdef CONFIG_PCI
 #ifdef CONFIG_PCI
-	if (dev->bus == &pci_bus_type)
+	if (dev_is_pci(dev))
 		return pci64_dma_supported(to_pci_dev(dev), device_mask);
 		return pci64_dma_supported(to_pci_dev(dev), device_mask);
 #endif
 #endif
 
 

+ 2 - 3
arch/sparc/kernel/ioport.c

@@ -666,10 +666,9 @@ EXPORT_SYMBOL(dma_ops);
  */
  */
 int dma_supported(struct device *dev, u64 mask)
 int dma_supported(struct device *dev, u64 mask)
 {
 {
-#ifdef CONFIG_PCI
-	if (dev->bus == &pci_bus_type)
+	if (dev_is_pci(dev))
 		return 1;
 		return 1;
-#endif
+
 	return 0;
 	return 0;
 }
 }
 EXPORT_SYMBOL(dma_supported);
 EXPORT_SYMBOL(dma_supported);

+ 1 - 0
arch/sparc/kernel/kgdb_64.c

@@ -6,6 +6,7 @@
 #include <linux/kgdb.h>
 #include <linux/kgdb.h>
 #include <linux/kdebug.h>
 #include <linux/kdebug.h>
 #include <linux/ftrace.h>
 #include <linux/ftrace.h>
+#include <linux/context_tracking.h>
 
 
 #include <asm/cacheflush.h>
 #include <asm/cacheflush.h>
 #include <asm/kdebug.h>
 #include <asm/kdebug.h>

+ 2 - 1
arch/sparc/kernel/smp_64.c

@@ -123,11 +123,12 @@ void smp_callin(void)
 		rmb();
 		rmb();
 
 
 	set_cpu_online(cpuid, true);
 	set_cpu_online(cpuid, true);
-	local_irq_enable();
 
 
 	/* idle thread is expected to have preempt disabled */
 	/* idle thread is expected to have preempt disabled */
 	preempt_disable();
 	preempt_disable();
 
 
+	local_irq_enable();
+
 	cpu_startup_entry(CPUHP_ONLINE);
 	cpu_startup_entry(CPUHP_ONLINE);
 }
 }
 
 

+ 1 - 0
arch/x86/Kconfig

@@ -26,6 +26,7 @@ config X86
 	select HAVE_AOUT if X86_32
 	select HAVE_AOUT if X86_32
 	select HAVE_UNSTABLE_SCHED_CLOCK
 	select HAVE_UNSTABLE_SCHED_CLOCK
 	select ARCH_SUPPORTS_NUMA_BALANCING
 	select ARCH_SUPPORTS_NUMA_BALANCING
+	select ARCH_SUPPORTS_INT128 if X86_64
 	select ARCH_WANTS_PROT_NUMA_PROT_NONE
 	select ARCH_WANTS_PROT_NUMA_PROT_NONE
 	select HAVE_IDE
 	select HAVE_IDE
 	select HAVE_OPROFILE
 	select HAVE_OPROFILE

+ 7 - 6
arch/x86/include/asm/fpu-internal.h

@@ -293,12 +293,13 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
 	/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
 	/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
 	   is pending.  Clear the x87 state here by setting it to fixed
 	   is pending.  Clear the x87 state here by setting it to fixed
 	   values. "m" is a random variable that should be in L1 */
 	   values. "m" is a random variable that should be in L1 */
-	alternative_input(
-		ASM_NOP8 ASM_NOP2,
-		"emms\n\t"		/* clear stack tags */
-		"fildl %P[addr]",	/* set F?P to defined value */
-		X86_FEATURE_FXSAVE_LEAK,
-		[addr] "m" (tsk->thread.fpu.has_fpu));
+	if (unlikely(static_cpu_has(X86_FEATURE_FXSAVE_LEAK))) {
+		asm volatile(
+			"fnclex\n\t"
+			"emms\n\t"
+			"fildl %P[addr]"	/* set F?P to defined value */
+			: : [addr] "m" (tsk->thread.fpu.has_fpu));
+	}
 
 
 	return fpu_restore_checking(&tsk->thread.fpu);
 	return fpu_restore_checking(&tsk->thread.fpu);
 }
 }

+ 9 - 2
arch/x86/include/asm/pgtable.h

@@ -452,9 +452,16 @@ static inline int pte_present(pte_t a)
 }
 }
 
 
 #define pte_accessible pte_accessible
 #define pte_accessible pte_accessible
-static inline int pte_accessible(pte_t a)
+static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
 {
 {
-	return pte_flags(a) & _PAGE_PRESENT;
+	if (pte_flags(a) & _PAGE_PRESENT)
+		return true;
+
+	if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) &&
+			mm_tlb_flush_pending(mm))
+		return true;
+
+	return false;
 }
 }
 
 
 static inline int pte_hidden(pte_t pte)
 static inline int pte_hidden(pte_t pte)

+ 11 - 0
arch/x86/include/asm/preempt.h

@@ -7,6 +7,12 @@
 
 
 DECLARE_PER_CPU(int, __preempt_count);
 DECLARE_PER_CPU(int, __preempt_count);
 
 
+/*
+ * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such
+ * that a decrement hitting 0 means we can and should reschedule.
+ */
+#define PREEMPT_ENABLED	(0 + PREEMPT_NEED_RESCHED)
+
 /*
 /*
  * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
  * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
  * that think a non-zero value indicates we cannot preempt.
  * that think a non-zero value indicates we cannot preempt.
@@ -74,6 +80,11 @@ static __always_inline void __preempt_count_sub(int val)
 	__this_cpu_add_4(__preempt_count, -val);
 	__this_cpu_add_4(__preempt_count, -val);
 }
 }
 
 
+/*
+ * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule
+ * a decrement which hits zero means we have no preempt_count and should
+ * reschedule.
+ */
 static __always_inline bool __preempt_count_dec_and_test(void)
 static __always_inline bool __preempt_count_dec_and_test(void)
 {
 {
 	GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
 	GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");

+ 2 - 1
arch/x86/kernel/cpu/intel.c

@@ -387,7 +387,8 @@ static void init_intel(struct cpuinfo_x86 *c)
 			set_cpu_cap(c, X86_FEATURE_PEBS);
 			set_cpu_cap(c, X86_FEATURE_PEBS);
 	}
 	}
 
 
-	if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush)
+	if (c->x86 == 6 && cpu_has_clflush &&
+	    (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
 		set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);
 		set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);
 
 
 #ifdef CONFIG_X86_64
 #ifdef CONFIG_X86_64

+ 12 - 3
arch/x86/kernel/cpu/perf_event.h

@@ -262,11 +262,20 @@ struct cpu_hw_events {
 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
 
 
-#define EVENT_CONSTRAINT_END		\
-	EVENT_CONSTRAINT(0, 0, 0)
+/*
+ * We define the end marker as having a weight of -1
+ * to enable blacklisting of events using a counter bitmask
+ * of zero and thus a weight of zero.
+ * The end marker has a weight that cannot possibly be
+ * obtained from counting the bits in the bitmask.
+ */
+#define EVENT_CONSTRAINT_END { .weight = -1 }
 
 
+/*
+ * Check for end marker with weight == -1
+ */
 #define for_each_event_constraint(e, c)	\
 #define for_each_event_constraint(e, c)	\
-	for ((e) = (c); (e)->weight; (e)++)
+	for ((e) = (c); (e)->weight != -1; (e)++)
 
 
 /*
 /*
  * Extra registers for specific events.
  * Extra registers for specific events.

+ 2 - 2
arch/x86/kernel/entry_32.S

@@ -1082,7 +1082,7 @@ ENTRY(ftrace_caller)
 	pushl $0	/* Pass NULL as regs pointer */
 	pushl $0	/* Pass NULL as regs pointer */
 	movl 4*4(%esp), %eax
 	movl 4*4(%esp), %eax
 	movl 0x4(%ebp), %edx
 	movl 0x4(%ebp), %edx
-	leal function_trace_op, %ecx
+	movl function_trace_op, %ecx
 	subl $MCOUNT_INSN_SIZE, %eax
 	subl $MCOUNT_INSN_SIZE, %eax
 
 
 .globl ftrace_call
 .globl ftrace_call
@@ -1140,7 +1140,7 @@ ENTRY(ftrace_regs_caller)
 	movl 12*4(%esp), %eax	/* Load ip (1st parameter) */
 	movl 12*4(%esp), %eax	/* Load ip (1st parameter) */
 	subl $MCOUNT_INSN_SIZE, %eax	/* Adjust ip */
 	subl $MCOUNT_INSN_SIZE, %eax	/* Adjust ip */
 	movl 0x4(%ebp), %edx	/* Load parent ip (2nd parameter) */
 	movl 0x4(%ebp), %edx	/* Load parent ip (2nd parameter) */
-	leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
+	movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
 	pushl %esp		/* Save pt_regs as 4th parameter */
 	pushl %esp		/* Save pt_regs as 4th parameter */
 
 
 GLOBAL(ftrace_regs_call)
 GLOBAL(ftrace_regs_call)

+ 1 - 1
arch/x86/kernel/entry_64.S

@@ -88,7 +88,7 @@ END(function_hook)
 	MCOUNT_SAVE_FRAME \skip
 	MCOUNT_SAVE_FRAME \skip
 
 
 	/* Load the ftrace_ops into the 3rd parameter */
 	/* Load the ftrace_ops into the 3rd parameter */
-	leaq function_trace_op, %rdx
+	movq function_trace_op(%rip), %rdx
 
 
 	/* Load ip into the first parameter */
 	/* Load ip into the first parameter */
 	movq RIP(%rsp), %rdi
 	movq RIP(%rsp), %rdi

+ 4 - 4
arch/x86/kvm/lapic.c

@@ -1350,6 +1350,10 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
 		return;
 		return;
 	}
 	}
 
 
+	if (!kvm_vcpu_is_bsp(apic->vcpu))
+		value &= ~MSR_IA32_APICBASE_BSP;
+	vcpu->arch.apic_base = value;
+
 	/* update jump label if enable bit changes */
 	/* update jump label if enable bit changes */
 	if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) {
 	if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) {
 		if (value & MSR_IA32_APICBASE_ENABLE)
 		if (value & MSR_IA32_APICBASE_ENABLE)
@@ -1359,10 +1363,6 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
 		recalculate_apic_map(vcpu->kvm);
 		recalculate_apic_map(vcpu->kvm);
 	}
 	}
 
 
-	if (!kvm_vcpu_is_bsp(apic->vcpu))
-		value &= ~MSR_IA32_APICBASE_BSP;
-
-	vcpu->arch.apic_base = value;
 	if ((old_value ^ value) & X2APIC_ENABLE) {
 	if ((old_value ^ value) & X2APIC_ENABLE) {
 		if (value & X2APIC_ENABLE) {
 		if (value & X2APIC_ENABLE) {
 			u32 id = kvm_apic_id(apic);
 			u32 id = kvm_apic_id(apic);

+ 1 - 2
arch/x86/kvm/vmx.c

@@ -8283,8 +8283,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
 	vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
 	vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
 	kvm_set_cr4(vcpu, vmcs12->host_cr4);
 	kvm_set_cr4(vcpu, vmcs12->host_cr4);
 
 
-	if (nested_cpu_has_ept(vmcs12))
-		nested_ept_uninit_mmu_context(vcpu);
+	nested_ept_uninit_mmu_context(vcpu);
 
 
 	kvm_set_cr3(vcpu, vmcs12->host_cr3);
 	kvm_set_cr3(vcpu, vmcs12->host_cr3);
 	kvm_mmu_reset_context(vcpu);
 	kvm_mmu_reset_context(vcpu);

+ 13 - 0
arch/x86/mm/gup.c

@@ -83,6 +83,12 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
 		pte_t pte = gup_get_pte(ptep);
 		pte_t pte = gup_get_pte(ptep);
 		struct page *page;
 		struct page *page;
 
 
+		/* Similar to the PMD case, NUMA hinting must take slow path */
+		if (pte_numa(pte)) {
+			pte_unmap(ptep);
+			return 0;
+		}
+
 		if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) {
 		if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) {
 			pte_unmap(ptep);
 			pte_unmap(ptep);
 			return 0;
 			return 0;
@@ -167,6 +173,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
 		if (pmd_none(pmd) || pmd_trans_splitting(pmd))
 		if (pmd_none(pmd) || pmd_trans_splitting(pmd))
 			return 0;
 			return 0;
 		if (unlikely(pmd_large(pmd))) {
 		if (unlikely(pmd_large(pmd))) {
+			/*
+			 * NUMA hinting faults need to be handled in the GUP
+			 * slowpath for accounting purposes and so that they
+			 * can be serialised against THP migration.
+			 */
+			if (pmd_numa(pmd))
+				return 0;
 			if (!gup_huge_pmd(pmd, addr, next, write, pages, nr))
 			if (!gup_huge_pmd(pmd, addr, next, write, pages, nr))
 				return 0;
 				return 0;
 		} else {
 		} else {

+ 13 - 0
block/blk-mq-sysfs.c

@@ -335,9 +335,22 @@ static struct kobj_type blk_mq_hw_ktype = {
 void blk_mq_unregister_disk(struct gendisk *disk)
 void blk_mq_unregister_disk(struct gendisk *disk)
 {
 {
 	struct request_queue *q = disk->queue;
 	struct request_queue *q = disk->queue;
+	struct blk_mq_hw_ctx *hctx;
+	struct blk_mq_ctx *ctx;
+	int i, j;
+
+	queue_for_each_hw_ctx(q, hctx, i) {
+		hctx_for_each_ctx(hctx, ctx, j) {
+			kobject_del(&ctx->kobj);
+			kobject_put(&ctx->kobj);
+		}
+		kobject_del(&hctx->kobj);
+		kobject_put(&hctx->kobj);
+	}
 
 
 	kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
 	kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
 	kobject_del(&q->mq_kobj);
 	kobject_del(&q->mq_kobj);
+	kobject_put(&q->mq_kobj);
 
 
 	kobject_put(&disk_to_dev(disk)->kobj);
 	kobject_put(&disk_to_dev(disk)->kobj);
 }
 }

+ 0 - 1
drivers/acpi/Kconfig

@@ -348,7 +348,6 @@ source "drivers/acpi/apei/Kconfig"
 config ACPI_EXTLOG
 config ACPI_EXTLOG
 	tristate "Extended Error Log support"
 	tristate "Extended Error Log support"
 	depends on X86_MCE && X86_LOCAL_APIC
 	depends on X86_MCE && X86_LOCAL_APIC
-	select EFI
 	select UEFI_CPER
 	select UEFI_CPER
 	default n
 	default n
 	help
 	help

+ 2 - 2
drivers/acpi/ac.c

@@ -207,7 +207,7 @@ static int acpi_ac_probe(struct platform_device *pdev)
 		goto end;
 		goto end;
 
 
 	result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev),
 	result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev),
-			ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler, ac);
+			ACPI_ALL_NOTIFY, acpi_ac_notify_handler, ac);
 	if (result) {
 	if (result) {
 		power_supply_unregister(&ac->charger);
 		power_supply_unregister(&ac->charger);
 		goto end;
 		goto end;
@@ -255,7 +255,7 @@ static int acpi_ac_remove(struct platform_device *pdev)
 		return -EINVAL;
 		return -EINVAL;
 
 
 	acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev),
 	acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev),
-			ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler);
+			ACPI_ALL_NOTIFY, acpi_ac_notify_handler);
 
 
 	ac = platform_get_drvdata(pdev);
 	ac = platform_get_drvdata(pdev);
 	if (ac->charger.dev)
 	if (ac->charger.dev)

+ 1 - 0
drivers/acpi/acpi_lpss.c

@@ -162,6 +162,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
 	{ "80860F14", (unsigned long)&byt_sdio_dev_desc },
 	{ "80860F14", (unsigned long)&byt_sdio_dev_desc },
 	{ "80860F41", (unsigned long)&byt_i2c_dev_desc },
 	{ "80860F41", (unsigned long)&byt_i2c_dev_desc },
 	{ "INT33B2", },
 	{ "INT33B2", },
+	{ "INT33FC", },
 
 
 	{ "INT3430", (unsigned long)&lpt_dev_desc },
 	{ "INT3430", (unsigned long)&lpt_dev_desc },
 	{ "INT3431", (unsigned long)&lpt_dev_desc },
 	{ "INT3431", (unsigned long)&lpt_dev_desc },

+ 0 - 1
drivers/acpi/apei/Kconfig

@@ -2,7 +2,6 @@ config ACPI_APEI
 	bool "ACPI Platform Error Interface (APEI)"
 	bool "ACPI Platform Error Interface (APEI)"
 	select MISC_FILESYSTEMS
 	select MISC_FILESYSTEMS
 	select PSTORE
 	select PSTORE
-	select EFI
 	select UEFI_CPER
 	select UEFI_CPER
 	depends on X86
 	depends on X86
 	help
 	help

+ 1 - 0
drivers/acpi/apei/erst.c

@@ -942,6 +942,7 @@ static int erst_clearer(enum pstore_type_id type, u64 id, int count,
 static struct pstore_info erst_info = {
 static struct pstore_info erst_info = {
 	.owner		= THIS_MODULE,
 	.owner		= THIS_MODULE,
 	.name		= "erst",
 	.name		= "erst",
+	.flags		= PSTORE_FLAGS_FRAGILE,
 	.open		= erst_open_pstore,
 	.open		= erst_open_pstore,
 	.close		= erst_close_pstore,
 	.close		= erst_close_pstore,
 	.read		= erst_reader,
 	.read		= erst_reader,

+ 21 - 1
drivers/acpi/battery.c

@@ -62,6 +62,7 @@ MODULE_AUTHOR("Alexey Starikovskiy <astarikovskiy@suse.de>");
 MODULE_DESCRIPTION("ACPI Battery Driver");
 MODULE_DESCRIPTION("ACPI Battery Driver");
 MODULE_LICENSE("GPL");
 MODULE_LICENSE("GPL");
 
 
+static int battery_bix_broken_package;
 static unsigned int cache_time = 1000;
 static unsigned int cache_time = 1000;
 module_param(cache_time, uint, 0644);
 module_param(cache_time, uint, 0644);
 MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
 MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
@@ -416,7 +417,12 @@ static int acpi_battery_get_info(struct acpi_battery *battery)
 		ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s", name));
 		ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s", name));
 		return -ENODEV;
 		return -ENODEV;
 	}
 	}
-	if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags))
+
+	if (battery_bix_broken_package)
+		result = extract_package(battery, buffer.pointer,
+				extended_info_offsets + 1,
+				ARRAY_SIZE(extended_info_offsets) - 1);
+	else if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags))
 		result = extract_package(battery, buffer.pointer,
 		result = extract_package(battery, buffer.pointer,
 				extended_info_offsets,
 				extended_info_offsets,
 				ARRAY_SIZE(extended_info_offsets));
 				ARRAY_SIZE(extended_info_offsets));
@@ -754,6 +760,17 @@ static int battery_notify(struct notifier_block *nb,
 	return 0;
 	return 0;
 }
 }
 
 
+static struct dmi_system_id bat_dmi_table[] = {
+	{
+		.ident = "NEC LZ750/LS",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "PC-LZ750LS"),
+		},
+	},
+	{},
+};
+
 static int acpi_battery_add(struct acpi_device *device)
 static int acpi_battery_add(struct acpi_device *device)
 {
 {
 	int result = 0;
 	int result = 0;
@@ -846,6 +863,9 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
 {
 {
 	if (acpi_disabled)
 	if (acpi_disabled)
 		return;
 		return;
+
+	if (dmi_check_system(bat_dmi_table))
+		battery_bix_broken_package = 1;
 	acpi_bus_register_driver(&acpi_battery_driver);
 	acpi_bus_register_driver(&acpi_battery_driver);
 }
 }
 
 

+ 10 - 0
drivers/acpi/bus.c

@@ -156,6 +156,16 @@ int acpi_bus_get_private_data(acpi_handle handle, void **data)
 }
 }
 EXPORT_SYMBOL(acpi_bus_get_private_data);
 EXPORT_SYMBOL(acpi_bus_get_private_data);
 
 
+void acpi_bus_no_hotplug(acpi_handle handle)
+{
+	struct acpi_device *adev = NULL;
+
+	acpi_bus_get_device(handle, &adev);
+	if (adev)
+		adev->flags.no_hotplug = true;
+}
+EXPORT_SYMBOL_GPL(acpi_bus_no_hotplug);
+
 static void acpi_print_osc_error(acpi_handle handle,
 static void acpi_print_osc_error(acpi_handle handle,
 	struct acpi_osc_context *context, char *error)
 	struct acpi_osc_context *context, char *error)
 {
 {

Some files were not shown because too many files changed in this diff