Browse Source

Merge commit 'f4bcd8ccddb02833340652e9f46f5127828eb79d' into x86/build

Bring in upstream merge of x86/kaslr for future patches.

Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
H. Peter Anvin 11 years ago
parent
commit
4064e0ea3c
100 changed files with 1259 additions and 2769 deletions
  1. 20 0
      Documentation/ABI/testing/sysfs-firmware-efi
  2. 34 0
      Documentation/ABI/testing/sysfs-firmware-efi-runtime-map
  3. 38 0
      Documentation/ABI/testing/sysfs-kernel-boot_params
  4. 13 9
      Documentation/RCU/trace.txt
  5. 18 1
      Documentation/acpi/apei/einj.txt
  6. 27 18
      Documentation/circular-buffers.txt
  7. 2 0
      Documentation/devicetree/bindings/clock/exynos5250-clock.txt
  8. 22 0
      Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.txt
  9. 31 5
      Documentation/kernel-parameters.txt
  10. 20 0
      Documentation/kmsg/s390/zcrypt
  11. 716 206
      Documentation/memory-barriers.txt
  12. 2 2
      Documentation/robust-futex-ABI.txt
  13. 0 5
      Documentation/sysctl/kernel.txt
  14. 3 0
      Documentation/x86/boot.txt
  15. 7 0
      Documentation/x86/x86_64/mm.txt
  16. 3 2
      Documentation/zorro.txt
  17. 22 3
      MAINTAINERS
  18. 18 4
      Makefile
  19. 67 0
      arch/Kconfig
  20. 5 20
      arch/alpha/include/asm/barrier.h
  21. 1 0
      arch/arc/include/asm/Kbuild
  22. 5 0
      arch/arc/include/asm/atomic.h
  23. 0 5
      arch/arc/include/asm/barrier.h
  24. 1 12
      arch/arm/Kconfig
  25. 0 4
      arch/arm/Makefile
  26. 14 0
      arch/arm/boot/compressed/misc.c
  27. 1 1
      arch/arm/boot/dts/exynos5250.dtsi
  28. 7 0
      arch/arm/boot/dts/sun5i-a10s.dtsi
  29. 7 0
      arch/arm/boot/dts/sun5i-a13.dtsi
  30. 10 0
      arch/arm/boot/dts/sun7i-a20.dtsi
  31. 1 1
      arch/arm/crypto/aesbs-core.S_shipped
  32. 1 1
      arch/arm/crypto/bsaes-armv7.pl
  33. 15 0
      arch/arm/include/asm/barrier.h
  34. 1 1
      arch/arm/include/asm/io.h
  35. 2 1
      arch/arm/include/asm/memory.h
  36. 1 1
      arch/arm/include/asm/unistd.h
  37. 1 1
      arch/arm/include/asm/xen/page.h
  38. 2 0
      arch/arm/include/uapi/asm/unistd.h
  39. 2 0
      arch/arm/kernel/calls.S
  40. 1 1
      arch/arm/kernel/devtree.c
  41. 0 4
      arch/arm/kernel/perf_event.c
  42. 1 1
      arch/arm/kernel/perf_event_cpu.c
  43. 10 3
      arch/arm/kernel/traps.c
  44. 3 2
      arch/arm/mach-footbridge/dc21285-timer.c
  45. 1 0
      arch/arm/mach-highbank/highbank.c
  46. 1 0
      arch/arm/mach-omap2/omap4-common.c
  47. 2 2
      arch/arm/mach-shmobile/board-armadillo800eva.c
  48. 1 1
      arch/arm/mach-shmobile/board-kzm9g.c
  49. 2 2
      arch/arm/mach-shmobile/board-mackerel.c
  50. 1 0
      arch/arm/mach-sunxi/Kconfig
  51. 3 3
      arch/arm/mm/flush.c
  52. 1 1
      arch/arm/mm/init.c
  53. 3 3
      arch/arm/net/bpf_jit_32.c
  54. 50 0
      arch/arm64/include/asm/barrier.h
  55. 1 1
      arch/arm64/include/asm/io.h
  56. 5 12
      arch/avr32/include/asm/barrier.h
  57. 1 17
      arch/blackfin/include/asm/barrier.h
  58. 1 0
      arch/cris/include/asm/Kbuild
  59. 0 25
      arch/cris/include/asm/barrier.h
  60. 1 7
      arch/frv/include/asm/barrier.h
  61. 1 0
      arch/hexagon/include/asm/Kbuild
  62. 5 1
      arch/hexagon/include/asm/atomic.h
  63. 0 4
      arch/hexagon/include/asm/barrier.h
  64. 0 12
      arch/ia64/Kconfig
  65. 0 2
      arch/ia64/Makefile
  66. 0 199
      arch/ia64/configs/xen_domu_defconfig
  67. 0 2
      arch/ia64/include/asm/acpi.h
  68. 23 0
      arch/ia64/include/asm/barrier.h
  69. 0 2
      arch/ia64/include/asm/machvec.h
  70. 0 22
      arch/ia64/include/asm/machvec_xen.h
  71. 0 1
      arch/ia64/include/asm/meminit.h
  72. 0 1
      arch/ia64/include/asm/paravirt.h
  73. 1 1
      arch/ia64/include/asm/pvclock-abi.h
  74. 0 51
      arch/ia64/include/asm/sync_bitops.h
  75. 0 41
      arch/ia64/include/asm/xen/events.h
  76. 0 265
      arch/ia64/include/asm/xen/hypercall.h
  77. 0 61
      arch/ia64/include/asm/xen/hypervisor.h
  78. 0 486
      arch/ia64/include/asm/xen/inst.h
  79. 0 363
      arch/ia64/include/asm/xen/interface.h
  80. 0 44
      arch/ia64/include/asm/xen/irq.h
  81. 0 143
      arch/ia64/include/asm/xen/minstate.h
  82. 0 38
      arch/ia64/include/asm/xen/page-coherent.h
  83. 0 65
      arch/ia64/include/asm/xen/page.h
  84. 0 38
      arch/ia64/include/asm/xen/patchlist.h
  85. 0 135
      arch/ia64/include/asm/xen/privop.h
  86. 0 51
      arch/ia64/include/asm/xen/xcom_hcall.h
  87. 0 42
      arch/ia64/include/asm/xen/xencomm.h
  88. 0 9
      arch/ia64/include/uapi/asm/break.h
  89. 0 3
      arch/ia64/kernel/acpi.c
  90. 0 32
      arch/ia64/kernel/asm-offsets.c
  91. 0 3
      arch/ia64/kernel/head.S
  92. 0 4
      arch/ia64/kernel/nr-irqs.c
  93. 0 3
      arch/ia64/kernel/paravirt_inst.h
  94. 0 4
      arch/ia64/kernel/paravirt_patchlist.h
  95. 0 6
      arch/ia64/kernel/vmlinux.lds.S
  96. 0 25
      arch/ia64/xen/Kconfig
  97. 0 37
      arch/ia64/xen/Makefile
  98. 0 3
      arch/ia64/xen/gate-data.S
  99. 0 94
      arch/ia64/xen/grant-table.c
  100. 0 88
      arch/ia64/xen/hypercall.S

+ 20 - 0
Documentation/ABI/testing/sysfs-firmware-efi

@@ -0,0 +1,20 @@
+What:		/sys/firmware/efi/fw_vendor
+Date:		December 2013
+Contact:	Dave Young <dyoung@redhat.com>
+Description:	It shows the physical address of firmware vendor field in the
+		EFI system table.
+Users:		Kexec
+
+What:		/sys/firmware/efi/runtime
+Date:		December 2013
+Contact:	Dave Young <dyoung@redhat.com>
+Description:	It shows the physical address of runtime service table entry in
+		the EFI system table.
+Users:		Kexec
+
+What:		/sys/firmware/efi/config_table
+Date:		December 2013
+Contact:	Dave Young <dyoung@redhat.com>
+Description:	It shows the physical address of config table entry in the EFI
+		system table.
+Users:		Kexec

+ 34 - 0
Documentation/ABI/testing/sysfs-firmware-efi-runtime-map

@@ -0,0 +1,34 @@
+What:		/sys/firmware/efi/runtime-map/
+Date:		December 2013
+Contact:	Dave Young <dyoung@redhat.com>
+Description:	Switching efi runtime services to virtual mode requires
+		that all efi memory ranges which have the runtime attribute
+		bit set to be mapped to virtual addresses.
+
+		The efi runtime services can only be switched to virtual
+		mode once without rebooting. The kexec kernel must maintain
+		the same physical to virtual address mappings as the first
+		kernel. The mappings are exported to sysfs so userspace tools
+		can reassemble them and pass them into the kexec kernel.
+
+		/sys/firmware/efi/runtime-map/ is the directory the kernel
+		exports that information in.
+
+		subdirectories are named with the number of the memory range:
+
+			/sys/firmware/efi/runtime-map/0
+			/sys/firmware/efi/runtime-map/1
+			/sys/firmware/efi/runtime-map/2
+			/sys/firmware/efi/runtime-map/3
+			...
+
+		Each subdirectory contains five files:
+
+		attribute : The attributes of the memory range.
+		num_pages : The size of the memory range in pages.
+		phys_addr : The physical address of the memory range.
+		type      : The type of the memory range.
+		virt_addr : The virtual address of the memory range.
+
+		Above values are all hexadecimal numbers with the '0x' prefix.
+Users:		Kexec

+ 38 - 0
Documentation/ABI/testing/sysfs-kernel-boot_params

@@ -0,0 +1,38 @@
+What:		/sys/kernel/boot_params
+Date:		December 2013
+Contact:	Dave Young <dyoung@redhat.com>
+Description:	The /sys/kernel/boot_params directory contains two
+		files: "data" and "version" and one subdirectory "setup_data".
+		It is used to export the kernel boot parameters of an x86
+		platform to userspace for kexec and debugging purpose.
+
+		If there's no setup_data in boot_params the subdirectory will
+		not be created.
+
+		"data" file is the binary representation of struct boot_params.
+
+		"version" file is the string representation of boot
+		protocol version.
+
+		"setup_data" subdirectory contains the setup_data data
+		structure in boot_params. setup_data is maintained in kernel
+		as a link list. In "setup_data" subdirectory there's one
+		subdirectory for each link list node named with the number
+		of the list nodes. The list node subdirectory contains two
+		files "type" and "data". "type" file is the string
+		representation of setup_data type. "data" file is the binary
+		representation of setup_data payload.
+
+		The whole boot_params directory structure is like below:
+		/sys/kernel/boot_params
+		|__ data
+		|__ setup_data
+		|   |__ 0
+		|   |   |__ data
+		|   |   |__ type
+		|   |__ 1
+		|       |__ data
+		|       |__ type
+		|__ version
+
+Users:		Kexec

+ 13 - 9
Documentation/RCU/trace.txt

@@ -396,14 +396,14 @@ o	Each element of the form "3/3 ..>. 0:7 ^0" represents one rcu_node
 
 The output of "cat rcu/rcu_sched/rcu_pending" looks as follows:
 
-  0!np=26111 qsp=29 rpq=5386 cbr=1 cng=570 gpc=3674 gps=577 nn=15903
-  1!np=28913 qsp=35 rpq=6097 cbr=1 cng=448 gpc=3700 gps=554 nn=18113
-  2!np=32740 qsp=37 rpq=6202 cbr=0 cng=476 gpc=4627 gps=546 nn=20889
-  3 np=23679 qsp=22 rpq=5044 cbr=1 cng=415 gpc=3403 gps=347 nn=14469
-  4!np=30714 qsp=4 rpq=5574 cbr=0 cng=528 gpc=3931 gps=639 nn=20042
-  5 np=28910 qsp=2 rpq=5246 cbr=0 cng=428 gpc=4105 gps=709 nn=18422
-  6!np=38648 qsp=5 rpq=7076 cbr=0 cng=840 gpc=4072 gps=961 nn=25699
-  7 np=37275 qsp=2 rpq=6873 cbr=0 cng=868 gpc=3416 gps=971 nn=25147
+  0!np=26111 qsp=29 rpq=5386 cbr=1 cng=570 gpc=3674 gps=577 nn=15903 ndw=0
+  1!np=28913 qsp=35 rpq=6097 cbr=1 cng=448 gpc=3700 gps=554 nn=18113 ndw=0
+  2!np=32740 qsp=37 rpq=6202 cbr=0 cng=476 gpc=4627 gps=546 nn=20889 ndw=0
+  3 np=23679 qsp=22 rpq=5044 cbr=1 cng=415 gpc=3403 gps=347 nn=14469 ndw=0
+  4!np=30714 qsp=4 rpq=5574 cbr=0 cng=528 gpc=3931 gps=639 nn=20042 ndw=0
+  5 np=28910 qsp=2 rpq=5246 cbr=0 cng=428 gpc=4105 gps=709 nn=18422 ndw=0
+  6!np=38648 qsp=5 rpq=7076 cbr=0 cng=840 gpc=4072 gps=961 nn=25699 ndw=0
+  7 np=37275 qsp=2 rpq=6873 cbr=0 cng=868 gpc=3416 gps=971 nn=25147 ndw=0
 
 The fields are as follows:
 
@@ -432,6 +432,10 @@ o	"gpc" is the number of times that an old grace period had
 o	"gps" is the number of times that a new grace period had started,
 	but this CPU was not yet aware of it.
 
+o	"ndw" is the number of times that a wakeup of an rcuo
+	callback-offload kthread had to be deferred in order to avoid
+	deadlock.
+
 o	"nn" is the number of times that this CPU needed nothing.
 
 
@@ -443,7 +447,7 @@ The output of "cat rcu/rcuboost" looks as follows:
     balk: nt=0 egt=6541 bt=0 nb=0 ny=126 nos=0
 
 This information is output only for rcu_preempt.  Each two-line entry
-corresponds to a leaf rcu_node strcuture.  The fields are as follows:
+corresponds to a leaf rcu_node structure.  The fields are as follows:
 
 o	"n:m" is the CPU-number range for the corresponding two-line
 	entry.  In the sample output above, the first entry covers

+ 18 - 1
Documentation/acpi/apei/einj.txt

@@ -45,11 +45,22 @@ directory apei/einj. The following files are provided.
   injection. Before this, please specify all necessary error
   parameters.
 
+- flags
+  Present for kernel version 3.13 and above. Used to specify which
+  of param{1..4} are valid and should be used by BIOS during injection.
+  Value is a bitmask as specified in ACPI5.0 spec for the
+  SET_ERROR_TYPE_WITH_ADDRESS data structure:
+	Bit 0 - Processor APIC field valid (see param3 below)
+	Bit 1 - Memory address and mask valid (param1 and param2)
+	Bit 2 - PCIe (seg,bus,dev,fn) valid (param4 below)
+  If set to zero, legacy behaviour is used where the type of injection
+  specifies just one bit set, and param1 is multiplexed.
+
 - param1
   This file is used to set the first error parameter value. Effect of
   parameter depends on error_type specified. For example, if error
   type is memory related type, the param1 should be a valid physical
-  memory address.
+  memory address. [Unless "flag" is set - see above]
 
 - param2
   This file is used to set the second error parameter value. Effect of
@@ -58,6 +69,12 @@ directory apei/einj. The following files are provided.
   address mask. Linux requires page or narrower granularity, say,
   0xfffffffffffff000.
 
+- param3
+  Used when the 0x1 bit is set in "flag" to specify the APIC id
+
+- param4
+  Used when the 0x4 bit is set in "flag" to specify target PCIe device
+
 - notrigger
   The EINJ mechanism is a two step process. First inject the error, then
   perform some actions to trigger it. Setting "notrigger" to 1 skips the

+ 27 - 18
Documentation/circular-buffers.txt

@@ -160,6 +160,7 @@ The producer will look something like this:
 	spin_lock(&producer_lock);
 
 	unsigned long head = buffer->head;
+	/* The spin_unlock() and next spin_lock() provide needed ordering. */
 	unsigned long tail = ACCESS_ONCE(buffer->tail);
 
 	if (CIRC_SPACE(head, tail, buffer->size) >= 1) {
@@ -168,9 +169,8 @@ The producer will look something like this:
 
 		produce_item(item);
 
-		smp_wmb(); /* commit the item before incrementing the head */
-
-		buffer->head = (head + 1) & (buffer->size - 1);
+		smp_store_release(buffer->head,
+				  (head + 1) & (buffer->size - 1));
 
 		/* wake_up() will make sure that the head is committed before
 		 * waking anyone up */
@@ -183,9 +183,14 @@ This will instruct the CPU that the contents of the new item must be written
 before the head index makes it available to the consumer and then instructs the
 CPU that the revised head index must be written before the consumer is woken.
 
-Note that wake_up() doesn't have to be the exact mechanism used, but whatever
-is used must guarantee a (write) memory barrier between the update of the head
-index and the change of state of the consumer, if a change of state occurs.
+Note that wake_up() does not guarantee any sort of barrier unless something
+is actually awakened.  We therefore cannot rely on it for ordering.  However,
+there is always one element of the array left empty.  Therefore, the
+producer must produce two elements before it could possibly corrupt the
+element currently being read by the consumer.  Therefore, the unlock-lock
+pair between consecutive invocations of the consumer provides the necessary
+ordering between the read of the index indicating that the consumer has
+vacated a given element and the write by the producer to that same element.
 
 
 THE CONSUMER
@@ -195,21 +200,20 @@ The consumer will look something like this:
 
 	spin_lock(&consumer_lock);
 
-	unsigned long head = ACCESS_ONCE(buffer->head);
+	/* Read index before reading contents at that index. */
+	unsigned long head = smp_load_acquire(buffer->head);
 	unsigned long tail = buffer->tail;
 
 	if (CIRC_CNT(head, tail, buffer->size) >= 1) {
-		/* read index before reading contents at that index */
-		smp_read_barrier_depends();
 
 		/* extract one item from the buffer */
 		struct item *item = buffer[tail];
 
 		consume_item(item);
 
-		smp_mb(); /* finish reading descriptor before incrementing tail */
-
-		buffer->tail = (tail + 1) & (buffer->size - 1);
+		/* Finish reading descriptor before incrementing tail. */
+		smp_store_release(buffer->tail,
+				  (tail + 1) & (buffer->size - 1));
 	}
 
 	spin_unlock(&consumer_lock);
@@ -218,12 +222,17 @@ This will instruct the CPU to make sure the index is up to date before reading
 the new item, and then it shall make sure the CPU has finished reading the item
 before it writes the new tail pointer, which will erase the item.
 
-
-Note the use of ACCESS_ONCE() in both algorithms to read the opposition index.
-This prevents the compiler from discarding and reloading its cached value -
-which some compilers will do across smp_read_barrier_depends().  This isn't
-strictly needed if you can be sure that the opposition index will _only_ be
-used the once.
+Note the use of ACCESS_ONCE() and smp_load_acquire() to read the
+opposition index.  This prevents the compiler from discarding and
+reloading its cached value - which some compilers will do across
+smp_read_barrier_depends().  This isn't strictly needed if you can
+be sure that the opposition index will _only_ be used the once.
+The smp_load_acquire() additionally forces the CPU to order against
+subsequent memory references.  Similarly, smp_store_release() is used
+in both algorithms to write the thread's index.  This documents the
+fact that we are writing to something that can be read concurrently,
+prevents the compiler from tearing the store, and enforces ordering
+against previous accesses.
 
 
 ===============

+ 2 - 0
Documentation/devicetree/bindings/clock/exynos5250-clock.txt

@@ -159,6 +159,8 @@ clock which they consume.
   mixer			343
   hdmi			344
   g2d			345
+  mdma0			346
+  smmu_mdma0		347
 
 
    [Clock Muxes]

+ 22 - 0
Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.txt

@@ -0,0 +1,22 @@
+Allwinner SoCs High Speed Timer Controller
+
+Required properties:
+
+- compatible :	should be "allwinner,sun5i-a13-hstimer" or
+		"allwinner,sun7i-a20-hstimer"
+- reg : Specifies base physical address and size of the registers.
+- interrupts :	The interrupts of these timers (2 for the sun5i IP, 4 for the sun7i
+		one)
+- clocks: phandle to the source clock (usually the AHB clock)
+
+Example:
+
+timer@01c60000 {
+	compatible = "allwinner,sun7i-a20-hstimer";
+	reg = <0x01c60000 0x1000>;
+	interrupts = <0 51 1>,
+		     <0 52 1>,
+		     <0 53 1>,
+		     <0 54 1>;
+	clocks = <&ahb1_gates 19>;
+};

+ 31 - 5
Documentation/kernel-parameters.txt

@@ -774,6 +774,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 	disable=	[IPV6]
 			See Documentation/networking/ipv6.txt.
 
+	disable_cpu_apicid= [X86,APIC,SMP]
+			Format: <int>
+			The number of initial APIC ID for the
+			corresponding CPU to be disabled at boot,
+			mostly used for the kdump 2nd kernel to
+			disable BSP to wake up multiple CPUs without
+			causing system reset or hang due to sending
+			INIT from AP to BSP.
+
 	disable_ddw     [PPC/PSERIES]
 			Disable Dynamic DMA Window support. Use this if
 			to workaround buggy firmware.
@@ -881,6 +890,14 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 
 			The xen output can only be used by Xen PV guests.
 
+	edac_report=	[HW,EDAC] Control how to report EDAC event
+			Format: {"on" | "off" | "force"}
+			on: enable EDAC to report H/W event. May be overridden
+			by other higher priority error reporting module.
+			off: disable H/W event reporting through EDAC.
+			force: enforce the use of EDAC to report H/W event.
+			default: on.
+
 	ekgdboc=	[X86,KGDB] Allow early kernel console debugging
 			ekgdboc=kbd
 
@@ -890,6 +907,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 	edd=		[EDD]
 			Format: {"off" | "on" | "skip[mbr]"}
 
+	efi=		[EFI]
+			Format: { "old_map" }
+			old_map [X86-64]: switch to the old ioremap-based EFI
+			runtime services mapping. 32-bit still uses this one by
+			default.
+
 	efi_no_storage_paranoia [EFI; X86]
 			Using this parameter you can use more than 50% of
 			your efi variable storage. Use this parameter only if
@@ -1994,6 +2017,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 	noapic		[SMP,APIC] Tells the kernel to not make use of any
 			IOAPICs that may be present in the system.
 
+	nokaslr		[X86]
+			Disable kernel base offset ASLR (Address Space
+			Layout Randomization) if built into the kernel.
+
 	noautogroup	Disable scheduler automatic task group creation.
 
 	nobats		[PPC] Do not use BATs for mapping kernel lowmem
@@ -2627,7 +2654,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 			for RCU-preempt, and "s" for RCU-sched, and "N"
 			is the CPU number.  This reduces OS jitter on the
 			offloaded CPUs, which can be useful for HPC and
-
 			real-time workloads.  It can also improve energy
 			efficiency for asymmetric multiprocessors.
 
@@ -2643,8 +2669,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 			periodically wake up to do the polling.
 
 	rcutree.blimit=	[KNL]
-			Set maximum number of finished RCU callbacks to process
-			in one batch.
+			Set maximum number of finished RCU callbacks to
+			process in one batch.
 
 	rcutree.rcu_fanout_leaf= [KNL]
 			Increase the number of CPUs assigned to each
@@ -2663,8 +2689,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 			value is one, and maximum value is HZ.
 
 	rcutree.qhimark= [KNL]
-			Set threshold of queued
-			RCU callbacks over which batch limiting is disabled.
+			Set threshold of queued RCU callbacks beyond which
+			batch limiting is disabled.
 
 	rcutree.qlowmark= [KNL]
 			Set threshold of queued RCU callbacks below which

+ 20 - 0
Documentation/kmsg/s390/zcrypt

@@ -0,0 +1,20 @@
+/*?
+ * Text: "Cryptographic device %x failed and was set offline\n"
+ * Severity: Error
+ * Parameter:
+ *   @1: device index
+ * Description:
+ * A cryptographic device failed to process a cryptographic request.
+ * The cryptographic device driver could not correct the error and
+ * set the device offline. The application that issued the
+ * request received an indication that the request has failed.
+ * User action:
+ * Use the lszcrypt command to confirm that the cryptographic
+ * hardware is still configured to your LPAR or z/VM guest virtual
+ * machine. If the device is available to your Linux instance the
+ * command output contains a line that begins with 'card<device index>',
+ * where <device index> is the two-digit decimal number in the message text.
+ * After ensuring that the device is available, use the chzcrypt command to
+ * set it online again.
+ * If the error persists, contact your support organization.
+ */

+ 716 - 206
Documentation/memory-barriers.txt

@@ -194,18 +194,22 @@ There are some minimal guarantees that may be expected of a CPU:
  (*) On any given CPU, dependent memory accesses will be issued in order, with
      respect to itself.  This means that for:
 
-	Q = P; D = *Q;
+	ACCESS_ONCE(Q) = P; smp_read_barrier_depends(); D = ACCESS_ONCE(*Q);
 
      the CPU will issue the following memory operations:
 
 	Q = LOAD P, D = LOAD *Q
 
-     and always in that order.
+     and always in that order.  On most systems, smp_read_barrier_depends()
+     does nothing, but it is required for DEC Alpha.  The ACCESS_ONCE()
+     is required to prevent compiler mischief.  Please note that you
+     should normally use something like rcu_dereference() instead of
+     open-coding smp_read_barrier_depends().
 
  (*) Overlapping loads and stores within a particular CPU will appear to be
      ordered within that CPU.  This means that for:
 
-	a = *X; *X = b;
+	a = ACCESS_ONCE(*X); ACCESS_ONCE(*X) = b;
 
      the CPU will only issue the following sequence of memory operations:
 
@@ -213,7 +217,7 @@ There are some minimal guarantees that may be expected of a CPU:
 
      And for:
 
-	*X = c; d = *X;
+	ACCESS_ONCE(*X) = c; d = ACCESS_ONCE(*X);
 
      the CPU will only issue:
 
@@ -224,6 +228,12 @@ There are some minimal guarantees that may be expected of a CPU:
 
 And there are a number of things that _must_ or _must_not_ be assumed:
 
+ (*) It _must_not_ be assumed that the compiler will do what you want with
+     memory references that are not protected by ACCESS_ONCE().  Without
+     ACCESS_ONCE(), the compiler is within its rights to do all sorts
+     of "creative" transformations, which are covered in the Compiler
+     Barrier section.
+
  (*) It _must_not_ be assumed that independent loads and stores will be issued
      in the order given.  This means that for:
 
@@ -371,33 +381,44 @@ Memory barriers come in four basic varieties:
 
 And a couple of implicit varieties:
 
- (5) LOCK operations.
+ (5) ACQUIRE operations.
 
      This acts as a one-way permeable barrier.  It guarantees that all memory
-     operations after the LOCK operation will appear to happen after the LOCK
-     operation with respect to the other components of the system.
+     operations after the ACQUIRE operation will appear to happen after the
+     ACQUIRE operation with respect to the other components of the system.
+     ACQUIRE operations include LOCK operations and smp_load_acquire()
+     operations.
 
-     Memory operations that occur before a LOCK operation may appear to happen
-     after it completes.
+     Memory operations that occur before an ACQUIRE operation may appear to
+     happen after it completes.
 
-     A LOCK operation should almost always be paired with an UNLOCK operation.
+     An ACQUIRE operation should almost always be paired with a RELEASE
+     operation.
 
 
- (6) UNLOCK operations.
+ (6) RELEASE operations.
 
      This also acts as a one-way permeable barrier.  It guarantees that all
-     memory operations before the UNLOCK operation will appear to happen before
-     the UNLOCK operation with respect to the other components of the system.
+     memory operations before the RELEASE operation will appear to happen
+     before the RELEASE operation with respect to the other components of the
+     system. RELEASE operations include UNLOCK operations and
+     smp_store_release() operations.
 
-     Memory operations that occur after an UNLOCK operation may appear to
+     Memory operations that occur after a RELEASE operation may appear to
      happen before it completes.
 
-     LOCK and UNLOCK operations are guaranteed to appear with respect to each
-     other strictly in the order specified.
+     The use of ACQUIRE and RELEASE operations generally precludes the need
+     for other sorts of memory barrier (but note the exceptions mentioned in
+     the subsection "MMIO write barrier").  In addition, a RELEASE+ACQUIRE
+     pair is -not- guaranteed to act as a full memory barrier.  However, after
+     an ACQUIRE on a given variable, all memory accesses preceding any prior
+     RELEASE on that same variable are guaranteed to be visible.  In other
+     words, within a given variable's critical section, all accesses of all
+     previous critical sections for that variable are guaranteed to have
+     completed.
 
-     The use of LOCK and UNLOCK operations generally precludes the need for
-     other sorts of memory barrier (but note the exceptions mentioned in the
-     subsection "MMIO write barrier").
+     This means that ACQUIRE acts as a minimal "acquire" operation and
+     RELEASE acts as a minimal "release" operation.
 
 
 Memory barriers are only required where there's a possibility of interaction
@@ -450,14 +471,14 @@ The usage requirements of data dependency barriers are a little subtle, and
 it's not always obvious that they're needed.  To illustrate, consider the
 following sequence of events:
 
-	CPU 1		CPU 2
-	===============	===============
+	CPU 1		      CPU 2
+	===============	      ===============
 	{ A == 1, B == 2, C = 3, P == &A, Q == &C }
 	B = 4;
 	<write barrier>
-	P = &B
-			Q = P;
-			D = *Q;
+	ACCESS_ONCE(P) = &B
+			      Q = ACCESS_ONCE(P);
+			      D = *Q;
 
 There's a clear data dependency here, and it would seem that by the end of the
 sequence, Q must be either &A or &B, and that:
@@ -477,15 +498,15 @@ Alpha).
 To deal with this, a data dependency barrier or better must be inserted
 between the address load and the data load:
 
-	CPU 1		CPU 2
-	===============	===============
+	CPU 1		      CPU 2
+	===============	      ===============
 	{ A == 1, B == 2, C = 3, P == &A, Q == &C }
 	B = 4;
 	<write barrier>
-	P = &B
-			Q = P;
-			<data dependency barrier>
-			D = *Q;
+	ACCESS_ONCE(P) = &B
+			      Q = ACCESS_ONCE(P);
+			      <data dependency barrier>
+			      D = *Q;
 
 This enforces the occurrence of one of the two implications, and prevents the
 third possibility from arising.
@@ -500,25 +521,26 @@ odd-numbered bank is idle, one can see the new value of the pointer P (&B),
 but the old value of the variable B (2).
 
 
-Another example of where data dependency barriers might by required is where a
+Another example of where data dependency barriers might be required is where a
 number is read from memory and then used to calculate the index for an array
 access:
 
-	CPU 1		CPU 2
-	===============	===============
+	CPU 1		      CPU 2
+	===============	      ===============
 	{ M[0] == 1, M[1] == 2, M[3] = 3, P == 0, Q == 3 }
 	M[1] = 4;
 	<write barrier>
-	P = 1
-			Q = P;
-			<data dependency barrier>
-			D = M[Q];
+	ACCESS_ONCE(P) = 1
+			      Q = ACCESS_ONCE(P);
+			      <data dependency barrier>
+			      D = M[Q];
 
 
-The data dependency barrier is very important to the RCU system, for example.
-See rcu_dereference() in include/linux/rcupdate.h.  This permits the current
-target of an RCU'd pointer to be replaced with a new modified target, without
-the replacement target appearing to be incompletely initialised.
+The data dependency barrier is very important to the RCU system,
+for example.  See rcu_assign_pointer() and rcu_dereference() in
+include/linux/rcupdate.h.  This permits the current target of an RCU'd
+pointer to be replaced with a new modified target, without the replacement
+target appearing to be incompletely initialised.
 
 See also the subsection on "Cache Coherency" for a more thorough example.
 
@@ -530,24 +552,190 @@ A control dependency requires a full read memory barrier, not simply a data
 dependency barrier to make it work correctly.  Consider the following bit of
 code:
 
-	q = &a;
-	if (p) {
-		<data dependency barrier>
-		q = &b;
+	q = ACCESS_ONCE(a);
+	if (q) {
+		<data dependency barrier>  /* BUG: No data dependency!!! */
+		p = ACCESS_ONCE(b);
 	}
-	x = *q;
 
 This will not have the desired effect because there is no actual data
-dependency, but rather a control dependency that the CPU may short-circuit by
-attempting to predict the outcome in advance.  In such a case what's actually
-required is:
+dependency, but rather a control dependency that the CPU may short-circuit
+by attempting to predict the outcome in advance, so that other CPUs see
+the load from b as having happened before the load from a.  In such a
+case what's actually required is:
 
-	q = &a;
-	if (p) {
+	q = ACCESS_ONCE(a);
+	if (q) {
 		<read barrier>
-		q = &b;
+		p = ACCESS_ONCE(b);
+	}
+
+However, stores are not speculated.  This means that ordering -is- provided
+in the following example:
+
+	q = ACCESS_ONCE(a);
+	if (ACCESS_ONCE(q)) {
+		ACCESS_ONCE(b) = p;
+	}
+
+Please note that ACCESS_ONCE() is not optional!  Without the ACCESS_ONCE(),
+the compiler is within its rights to transform this example:
+
+	q = a;
+	if (q) {
+		b = p;  /* BUG: Compiler can reorder!!! */
+		do_something();
+	} else {
+		b = p;  /* BUG: Compiler can reorder!!! */
+		do_something_else();
+	}
+
+into this, which of course defeats the ordering:
+
+	b = p;
+	q = a;
+	if (q)
+		do_something();
+	else
+		do_something_else();
+
+Worse yet, if the compiler is able to prove (say) that the value of
+variable 'a' is always non-zero, it would be well within its rights
+to optimize the original example by eliminating the "if" statement
+as follows:
+
+	q = a;
+	b = p;  /* BUG: Compiler can reorder!!! */
+	do_something();
+
+The solution is again ACCESS_ONCE(), which preserves the ordering between
+the load from variable 'a' and the store to variable 'b':
+
+	q = ACCESS_ONCE(a);
+	if (q) {
+		ACCESS_ONCE(b) = p;
+		do_something();
+	} else {
+		ACCESS_ONCE(b) = p;
+		do_something_else();
+	}
+
+You could also use barrier() to prevent the compiler from moving
+the stores to variable 'b', but barrier() would not prevent the
+compiler from proving to itself that a==1 always, so ACCESS_ONCE()
+is also needed.
+
+It is important to note that control dependencies absolutely require a
+a conditional.  For example, the following "optimized" version of
+the above example breaks ordering:
+
+	q = ACCESS_ONCE(a);
+	ACCESS_ONCE(b) = p;  /* BUG: No ordering vs. load from a!!! */
+	if (q) {
+		/* ACCESS_ONCE(b) = p; -- moved up, BUG!!! */
+		do_something();
+	} else {
+		/* ACCESS_ONCE(b) = p; -- moved up, BUG!!! */
+		do_something_else();
 	}
-	x = *q;
+
+It is of course legal for the prior load to be part of the conditional,
+for example, as follows:
+
+	if (ACCESS_ONCE(a) > 0) {
+		ACCESS_ONCE(b) = q / 2;
+		do_something();
+	} else {
+		ACCESS_ONCE(b) = q / 3;
+		do_something_else();
+	}
+
+This will again ensure that the load from variable 'a' is ordered before the
+stores to variable 'b'.
+
+In addition, you need to be careful what you do with the local variable 'q',
+otherwise the compiler might be able to guess the value and again remove
+the needed conditional.  For example:
+
+	q = ACCESS_ONCE(a);
+	if (q % MAX) {
+		ACCESS_ONCE(b) = p;
+		do_something();
+	} else {
+		ACCESS_ONCE(b) = p;
+		do_something_else();
+	}
+
+If MAX is defined to be 1, then the compiler knows that (q % MAX) is
+equal to zero, in which case the compiler is within its rights to
+transform the above code into the following:
+
+	q = ACCESS_ONCE(a);
+	ACCESS_ONCE(b) = p;
+	do_something_else();
+
+This transformation loses the ordering between the load from variable 'a'
+and the store to variable 'b'.  If you are relying on this ordering, you
+should do something like the following:
+
+	q = ACCESS_ONCE(a);
+	BUILD_BUG_ON(MAX <= 1); /* Order load from a with store to b. */
+	if (q % MAX) {
+		ACCESS_ONCE(b) = p;
+		do_something();
+	} else {
+		ACCESS_ONCE(b) = p;
+		do_something_else();
+	}
+
+Finally, control dependencies do -not- provide transitivity.  This is
+demonstrated by two related examples:
+
+	CPU 0                     CPU 1
+	=====================     =====================
+	r1 = ACCESS_ONCE(x);      r2 = ACCESS_ONCE(y);
+	if (r1 >= 0)              if (r2 >= 0)
+	  ACCESS_ONCE(y) = 1;       ACCESS_ONCE(x) = 1;
+
+	assert(!(r1 == 1 && r2 == 1));
+
+The above two-CPU example will never trigger the assert().  However,
+if control dependencies guaranteed transitivity (which they do not),
+then adding the following two CPUs would guarantee a related assertion:
+
+	CPU 2                     CPU 3
+	=====================     =====================
+	ACCESS_ONCE(x) = 2;       ACCESS_ONCE(y) = 2;
+
+	assert(!(r1 == 2 && r2 == 2 && x == 1 && y == 1)); /* FAILS!!! */
+
+But because control dependencies do -not- provide transitivity, the
+above assertion can fail after the combined four-CPU example completes.
+If you need the four-CPU example to provide ordering, you will need
+smp_mb() between the loads and stores in the CPU 0 and CPU 1 code fragments.
+
+In summary:
+
+  (*) Control dependencies can order prior loads against later stores.
+      However, they do -not- guarantee any other sort of ordering:
+      Not prior loads against later loads, nor prior stores against
+      later anything.  If you need these other forms of ordering,
+      use smb_rmb(), smp_wmb(), or, in the case of prior stores and
+      later loads, smp_mb().
+
+  (*) Control dependencies require at least one run-time conditional
+      between the prior load and the subsequent store.  If the compiler
+      is able to optimize the conditional away, it will have also
+      optimized away the ordering.  Careful use of ACCESS_ONCE() can
+      help to preserve the needed conditional.
+
+  (*) Control dependencies require that the compiler avoid reordering the
+      dependency into nonexistence.  Careful use of ACCESS_ONCE() or
+      barrier() can help to preserve your control dependency.  Please
+      see the Compiler Barrier section for more information.
+
+  (*) Control dependencies do -not- provide transitivity.  If you
+      need transitivity, use smp_mb().
 
 
 SMP BARRIER PAIRING
@@ -561,23 +749,23 @@ barrier, though a general barrier would also be viable.  Similarly a read
 barrier or a data dependency barrier should always be paired with at least an
 write barrier, though, again, a general barrier is viable:
 
-	CPU 1		CPU 2
-	===============	===============
-	a = 1;
+	CPU 1		      CPU 2
+	===============	      ===============
+	ACCESS_ONCE(a) = 1;
 	<write barrier>
-	b = 2;		x = b;
-			<read barrier>
-			y = a;
+	ACCESS_ONCE(b) = 2;   x = ACCESS_ONCE(b);
+			      <read barrier>
+			      y = ACCESS_ONCE(a);
 
 Or:
 
-	CPU 1		CPU 2
-	===============	===============================
+	CPU 1		      CPU 2
+	===============	      ===============================
 	a = 1;
 	<write barrier>
-	b = &a;		x = b;
-			<data dependency barrier>
-			y = *x;
+	ACCESS_ONCE(b) = &a;  x = ACCESS_ONCE(b);
+			      <data dependency barrier>
+			      y = *x;
 
 Basically, the read barrier always has to be there, even though it can be of
 the "weaker" type.
@@ -586,13 +774,13 @@ the "weaker" type.
 match the loads after the read barrier or the data dependency barrier, and vice
 versa:
 
-	CPU 1                           CPU 2
-	===============                 ===============
-	a = 1;           }----   --->{  v = c
-	b = 2;           }    \ /    {  w = d
-	<write barrier>        \        <read barrier>
-	c = 3;           }    / \    {  x = a;
-	d = 4;           }----   --->{  y = b;
+	CPU 1                               CPU 2
+	===================                 ===================
+	ACCESS_ONCE(a) = 1;  }----   --->{  v = ACCESS_ONCE(c);
+	ACCESS_ONCE(b) = 2;  }    \ /    {  w = ACCESS_ONCE(d);
+	<write barrier>            \        <read barrier>
+	ACCESS_ONCE(c) = 3;  }    / \    {  x = ACCESS_ONCE(a);
+	ACCESS_ONCE(d) = 4;  }----   --->{  y = ACCESS_ONCE(b);
 
 
 EXAMPLES OF MEMORY BARRIER SEQUENCES
@@ -882,12 +1070,12 @@ cache it for later use.
 
 Consider:
 
-	CPU 1	   		CPU 2
+	CPU 1			CPU 2
 	=======================	=======================
-	 	   		LOAD B
-	 	   		DIVIDE		} Divide instructions generally
-	 	   		DIVIDE		} take a long time to perform
-	 	   		LOAD A
+				LOAD B
+				DIVIDE		} Divide instructions generally
+				DIVIDE		} take a long time to perform
+				LOAD A
 
 Which might appear as this:
 
@@ -910,13 +1098,13 @@ Which might appear as this:
 Placing a read barrier or a data dependency barrier just before the second
 load:
 
-	CPU 1	   		CPU 2
+	CPU 1			CPU 2
 	=======================	=======================
-	 	   		LOAD B
-	 	   		DIVIDE
-	 	   		DIVIDE
+				LOAD B
+				DIVIDE
+				DIVIDE
 				<read barrier>
-	 	   		LOAD A
+				LOAD A
 
 will force any value speculatively obtained to be reconsidered to an extent
 dependent on the type of barrier used.  If there was no change made to the
@@ -1042,10 +1230,277 @@ compiler from moving the memory accesses either side of it to the other side:
 
 	barrier();
 
-This is a general barrier - lesser varieties of compiler barrier do not exist.
+This is a general barrier -- there are no read-read or write-write variants
+of barrier().  However, ACCESS_ONCE() can be thought of as a weak form
+for barrier() that affects only the specific accesses flagged by the
+ACCESS_ONCE().
+
+The barrier() function has the following effects:
+
+ (*) Prevents the compiler from reordering accesses following the
+     barrier() to precede any accesses preceding the barrier().
+     One example use for this property is to ease communication between
+     interrupt-handler code and the code that was interrupted.
+
+ (*) Within a loop, forces the compiler to load the variables used
+     in that loop's conditional on each pass through that loop.
+
+The ACCESS_ONCE() function can prevent any number of optimizations that,
+while perfectly safe in single-threaded code, can be fatal in concurrent
+code.  Here are some examples of these sorts of optimizations:
+
+ (*) The compiler is within its rights to merge successive loads from
+     the same variable.  Such merging can cause the compiler to "optimize"
+     the following code:
+
+	while (tmp = a)
+		do_something_with(tmp);
+
+     into the following code, which, although in some sense legitimate
+     for single-threaded code, is almost certainly not what the developer
+     intended:
+
+	if (tmp = a)
+		for (;;)
+			do_something_with(tmp);
+
+     Use ACCESS_ONCE() to prevent the compiler from doing this to you:
+
+	while (tmp = ACCESS_ONCE(a))
+		do_something_with(tmp);
+
+ (*) The compiler is within its rights to reload a variable, for example,
+     in cases where high register pressure prevents the compiler from
+     keeping all data of interest in registers.  The compiler might
+     therefore optimize the variable 'tmp' out of our previous example:
+
+	while (tmp = a)
+		do_something_with(tmp);
+
+     This could result in the following code, which is perfectly safe in
+     single-threaded code, but can be fatal in concurrent code:
+
+	while (a)
+		do_something_with(a);
+
+     For example, the optimized version of this code could result in
+     passing a zero to do_something_with() in the case where the variable
+     a was modified by some other CPU between the "while" statement and
+     the call to do_something_with().
+
+     Again, use ACCESS_ONCE() to prevent the compiler from doing this:
+
+	while (tmp = ACCESS_ONCE(a))
+		do_something_with(tmp);
+
+     Note that if the compiler runs short of registers, it might save
+     tmp onto the stack.  The overhead of this saving and later restoring
+     is why compilers reload variables.  Doing so is perfectly safe for
+     single-threaded code, so you need to tell the compiler about cases
+     where it is not safe.
+
+ (*) The compiler is within its rights to omit a load entirely if it knows
+     what the value will be.  For example, if the compiler can prove that
+     the value of variable 'a' is always zero, it can optimize this code:
+
+	while (tmp = a)
+		do_something_with(tmp);
 
-The compiler barrier has no direct effect on the CPU, which may then reorder
-things however it wishes.
+     Into this:
+
+	do { } while (0);
+
+     This transformation is a win for single-threaded code because it gets
+     rid of a load and a branch.  The problem is that the compiler will
+     carry out its proof assuming that the current CPU is the only one
+     updating variable 'a'.  If variable 'a' is shared, then the compiler's
+     proof will be erroneous.  Use ACCESS_ONCE() to tell the compiler
+     that it doesn't know as much as it thinks it does:
+
+	while (tmp = ACCESS_ONCE(a))
+		do_something_with(tmp);
+
+     But please note that the compiler is also closely watching what you
+     do with the value after the ACCESS_ONCE().  For example, suppose you
+     do the following and MAX is a preprocessor macro with the value 1:
+
+	while ((tmp = ACCESS_ONCE(a)) % MAX)
+		do_something_with(tmp);
+
+     Then the compiler knows that the result of the "%" operator applied
+     to MAX will always be zero, again allowing the compiler to optimize
+     the code into near-nonexistence.  (It will still load from the
+     variable 'a'.)
+
+ (*) Similarly, the compiler is within its rights to omit a store entirely
+     if it knows that the variable already has the value being stored.
+     Again, the compiler assumes that the current CPU is the only one
+     storing into the variable, which can cause the compiler to do the
+     wrong thing for shared variables.  For example, suppose you have
+     the following:
+
+	a = 0;
+	/* Code that does not store to variable a. */
+	a = 0;
+
+     The compiler sees that the value of variable 'a' is already zero, so
+     it might well omit the second store.  This would come as a fatal
+     surprise if some other CPU might have stored to variable 'a' in the
+     meantime.
+
+     Use ACCESS_ONCE() to prevent the compiler from making this sort of
+     wrong guess:
+
+	ACCESS_ONCE(a) = 0;
+	/* Code that does not store to variable a. */
+	ACCESS_ONCE(a) = 0;
+
+ (*) The compiler is within its rights to reorder memory accesses unless
+     you tell it not to.  For example, consider the following interaction
+     between process-level code and an interrupt handler:
+
+	void process_level(void)
+	{
+		msg = get_message();
+		flag = true;
+	}
+
+	void interrupt_handler(void)
+	{
+		if (flag)
+			process_message(msg);
+	}
+
+     There is nothing to prevent the the compiler from transforming
+     process_level() to the following, in fact, this might well be a
+     win for single-threaded code:
+
+	void process_level(void)
+	{
+		flag = true;
+		msg = get_message();
+	}
+
+     If the interrupt occurs between these two statement, then
+     interrupt_handler() might be passed a garbled msg.  Use ACCESS_ONCE()
+     to prevent this as follows:
+
+	void process_level(void)
+	{
+		ACCESS_ONCE(msg) = get_message();
+		ACCESS_ONCE(flag) = true;
+	}
+
+	void interrupt_handler(void)
+	{
+		if (ACCESS_ONCE(flag))
+			process_message(ACCESS_ONCE(msg));
+	}
+
+     Note that the ACCESS_ONCE() wrappers in interrupt_handler()
+     are needed if this interrupt handler can itself be interrupted
+     by something that also accesses 'flag' and 'msg', for example,
+     a nested interrupt or an NMI.  Otherwise, ACCESS_ONCE() is not
+     needed in interrupt_handler() other than for documentation purposes.
+     (Note also that nested interrupts do not typically occur in modern
+     Linux kernels, in fact, if an interrupt handler returns with
+     interrupts enabled, you will get a WARN_ONCE() splat.)
+
+     You should assume that the compiler can move ACCESS_ONCE() past
+     code not containing ACCESS_ONCE(), barrier(), or similar primitives.
+
+     This effect could also be achieved using barrier(), but ACCESS_ONCE()
+     is more selective:  With ACCESS_ONCE(), the compiler need only forget
+     the contents of the indicated memory locations, while with barrier()
+     the compiler must discard the value of all memory locations that
+     it has currented cached in any machine registers.  Of course,
+     the compiler must also respect the order in which the ACCESS_ONCE()s
+     occur, though the CPU of course need not do so.
+
+ (*) The compiler is within its rights to invent stores to a variable,
+     as in the following example:
+
+	if (a)
+		b = a;
+	else
+		b = 42;
+
+     The compiler might save a branch by optimizing this as follows:
+
+	b = 42;
+	if (a)
+		b = a;
+
+     In single-threaded code, this is not only safe, but also saves
+     a branch.  Unfortunately, in concurrent code, this optimization
+     could cause some other CPU to see a spurious value of 42 -- even
+     if variable 'a' was never zero -- when loading variable 'b'.
+     Use ACCESS_ONCE() to prevent this as follows:
+
+	if (a)
+		ACCESS_ONCE(b) = a;
+	else
+		ACCESS_ONCE(b) = 42;
+
+     The compiler can also invent loads.  These are usually less
+     damaging, but they can result in cache-line bouncing and thus in
+     poor performance and scalability.  Use ACCESS_ONCE() to prevent
+     invented loads.
+
+ (*) For aligned memory locations whose size allows them to be accessed
+     with a single memory-reference instruction, prevents "load tearing"
+     and "store tearing," in which a single large access is replaced by
+     multiple smaller accesses.  For example, given an architecture having
+     16-bit store instructions with 7-bit immediate fields, the compiler
+     might be tempted to use two 16-bit store-immediate instructions to
+     implement the following 32-bit store:
+
+	p = 0x00010002;
+
+     Please note that GCC really does use this sort of optimization,
+     which is not surprising given that it would likely take more
+     than two instructions to build the constant and then store it.
+     This optimization can therefore be a win in single-threaded code.
+     In fact, a recent bug (since fixed) caused GCC to incorrectly use
+     this optimization in a volatile store.  In the absence of such bugs,
+     use of ACCESS_ONCE() prevents store tearing in the following example:
+
+	ACCESS_ONCE(p) = 0x00010002;
+
+     Use of packed structures can also result in load and store tearing,
+     as in this example:
+
+	struct __attribute__((__packed__)) foo {
+		short a;
+		int b;
+		short c;
+	};
+	struct foo foo1, foo2;
+	...
+
+	foo2.a = foo1.a;
+	foo2.b = foo1.b;
+	foo2.c = foo1.c;
+
+     Because there are no ACCESS_ONCE() wrappers and no volatile markings,
+     the compiler would be well within its rights to implement these three
+     assignment statements as a pair of 32-bit loads followed by a pair
+     of 32-bit stores.  This would result in load tearing on 'foo1.b'
+     and store tearing on 'foo2.b'.  ACCESS_ONCE() again prevents tearing
+     in this example:
+
+	foo2.a = foo1.a;
+	ACCESS_ONCE(foo2.b) = ACCESS_ONCE(foo1.b);
+	foo2.c = foo1.c;
+
+All that aside, it is never necessary to use ACCESS_ONCE() on a variable
+that has been marked volatile.  For example, because 'jiffies' is marked
+volatile, it is never necessary to say ACCESS_ONCE(jiffies).  The reason
+for this is that ACCESS_ONCE() is implemented as a volatile cast, which
+has no effect when its argument is already marked volatile.
+
+Please note that these compiler barriers have no direct effect on the CPU,
+which may then reorder things however it wishes.
 
 
 CPU MEMORY BARRIERS
@@ -1135,7 +1590,7 @@ There are some more advanced barrier functions:
 	clear_bit( ... );
 
      This prevents memory operations before the clear leaking to after it.  See
-     the subsection on "Locking Functions" with reference to UNLOCK operation
+     the subsection on "Locking Functions" with reference to RELEASE operation
      implications.
 
      See Documentation/atomic_ops.txt for more information.  See the "Atomic
@@ -1169,8 +1624,8 @@ provide more substantial guarantees, but these may not be relied upon outside
 of arch specific code.
 
 
-LOCKING FUNCTIONS
------------------
+ACQUIRING FUNCTIONS
+-------------------
 
 The Linux kernel has a number of locking constructs:
 
@@ -1181,65 +1636,107 @@ The Linux kernel has a number of locking constructs:
  (*) R/W semaphores
  (*) RCU
 
-In all cases there are variants on "LOCK" operations and "UNLOCK" operations
+In all cases there are variants on "ACQUIRE" operations and "RELEASE" operations
 for each construct.  These operations all imply certain barriers:
 
- (1) LOCK operation implication:
+ (1) ACQUIRE operation implication:
 
-     Memory operations issued after the LOCK will be completed after the LOCK
-     operation has completed.
+     Memory operations issued after the ACQUIRE will be completed after the
+     ACQUIRE operation has completed.
 
-     Memory operations issued before the LOCK may be completed after the LOCK
-     operation has completed.
+     Memory operations issued before the ACQUIRE may be completed after the
+     ACQUIRE operation has completed.  An smp_mb__before_spinlock(), combined
+     with a following ACQUIRE, orders prior loads against subsequent stores and
+     stores and prior stores against subsequent stores.  Note that this is
+     weaker than smp_mb()!  The smp_mb__before_spinlock() primitive is free on
+     many architectures.
 
- (2) UNLOCK operation implication:
+ (2) RELEASE operation implication:
 
-     Memory operations issued before the UNLOCK will be completed before the
-     UNLOCK operation has completed.
+     Memory operations issued before the RELEASE will be completed before the
+     RELEASE operation has completed.
 
-     Memory operations issued after the UNLOCK may be completed before the
-     UNLOCK operation has completed.
+     Memory operations issued after the RELEASE may be completed before the
+     RELEASE operation has completed.
 
- (3) LOCK vs LOCK implication:
+ (3) ACQUIRE vs ACQUIRE implication:
 
-     All LOCK operations issued before another LOCK operation will be completed
-     before that LOCK operation.
+     All ACQUIRE operations issued before another ACQUIRE operation will be
+     completed before that ACQUIRE operation.
 
- (4) LOCK vs UNLOCK implication:
+ (4) ACQUIRE vs RELEASE implication:
 
-     All LOCK operations issued before an UNLOCK operation will be completed
-     before the UNLOCK operation.
+     All ACQUIRE operations issued before a RELEASE operation will be
+     completed before the RELEASE operation.
 
-     All UNLOCK operations issued before a LOCK operation will be completed
-     before the LOCK operation.
+ (5) Failed conditional ACQUIRE implication:
 
- (5) Failed conditional LOCK implication:
-
-     Certain variants of the LOCK operation may fail, either due to being
-     unable to get the lock immediately, or due to receiving an unblocked
+     Certain locking variants of the ACQUIRE operation may fail, either due to
+     being unable to get the lock immediately, or due to receiving an unblocked
      signal whilst asleep waiting for the lock to become available.  Failed
      locks do not imply any sort of barrier.
 
-Therefore, from (1), (2) and (4) an UNLOCK followed by an unconditional LOCK is
-equivalent to a full barrier, but a LOCK followed by an UNLOCK is not.
-
-[!] Note: one of the consequences of LOCKs and UNLOCKs being only one-way
-    barriers is that the effects of instructions outside of a critical section
-    may seep into the inside of the critical section.
+[!] Note: one of the consequences of lock ACQUIREs and RELEASEs being only
+one-way barriers is that the effects of instructions outside of a critical
+section may seep into the inside of the critical section.
 
-A LOCK followed by an UNLOCK may not be assumed to be full memory barrier
-because it is possible for an access preceding the LOCK to happen after the
-LOCK, and an access following the UNLOCK to happen before the UNLOCK, and the
-two accesses can themselves then cross:
+An ACQUIRE followed by a RELEASE may not be assumed to be full memory barrier
+because it is possible for an access preceding the ACQUIRE to happen after the
+ACQUIRE, and an access following the RELEASE to happen before the RELEASE, and
+the two accesses can themselves then cross:
 
 	*A = a;
-	LOCK
-	UNLOCK
+	ACQUIRE M
+	RELEASE M
 	*B = b;
 
 may occur as:
 
-	LOCK, STORE *B, STORE *A, UNLOCK
+	ACQUIRE M, STORE *B, STORE *A, RELEASE M
+
+This same reordering can of course occur if the lock's ACQUIRE and RELEASE are
+to the same lock variable, but only from the perspective of another CPU not
+holding that lock.
+
+In short, a RELEASE followed by an ACQUIRE may -not- be assumed to be a full
+memory barrier because it is possible for a preceding RELEASE to pass a
+later ACQUIRE from the viewpoint of the CPU, but not from the viewpoint
+of the compiler.  Note that deadlocks cannot be introduced by this
+interchange because if such a deadlock threatened, the RELEASE would
+simply complete.
+
+If it is necessary for a RELEASE-ACQUIRE pair to produce a full barrier, the
+ACQUIRE can be followed by an smp_mb__after_unlock_lock() invocation.  This
+will produce a full barrier if either (a) the RELEASE and the ACQUIRE are
+executed by the same CPU or task, or (b) the RELEASE and ACQUIRE act on the
+same variable.  The smp_mb__after_unlock_lock() primitive is free on many
+architectures.  Without smp_mb__after_unlock_lock(), the critical sections
+corresponding to the RELEASE and the ACQUIRE can cross:
+
+	*A = a;
+	RELEASE M
+	ACQUIRE N
+	*B = b;
+
+could occur as:
+
+	ACQUIRE N, STORE *B, STORE *A, RELEASE M
+
+With smp_mb__after_unlock_lock(), they cannot, so that:
+
+	*A = a;
+	RELEASE M
+	ACQUIRE N
+	smp_mb__after_unlock_lock();
+	*B = b;
+
+will always occur as either of the following:
+
+	STORE *A, RELEASE, ACQUIRE, STORE *B
+	STORE *A, ACQUIRE, RELEASE, STORE *B
+
+If the RELEASE and ACQUIRE were instead both operating on the same lock
+variable, only the first of these two alternatives can occur.
 
 Locks and semaphores may not provide any guarantee of ordering on UP compiled
 systems, and so cannot be counted on in such a situation to actually achieve
@@ -1253,33 +1750,33 @@ As an example, consider the following:
 
 	*A = a;
 	*B = b;
-	LOCK
+	ACQUIRE
 	*C = c;
 	*D = d;
-	UNLOCK
+	RELEASE
 	*E = e;
 	*F = f;
 
 The following sequence of events is acceptable:
 
-	LOCK, {*F,*A}, *E, {*C,*D}, *B, UNLOCK
+	ACQUIRE, {*F,*A}, *E, {*C,*D}, *B, RELEASE
 
 	[+] Note that {*F,*A} indicates a combined access.
 
 But none of the following are:
 
-	{*F,*A}, *B,	LOCK, *C, *D,	UNLOCK, *E
-	*A, *B, *C,	LOCK, *D,	UNLOCK, *E, *F
-	*A, *B,		LOCK, *C,	UNLOCK, *D, *E, *F
-	*B,		LOCK, *C, *D,	UNLOCK, {*F,*A}, *E
+	{*F,*A}, *B,	ACQUIRE, *C, *D,	RELEASE, *E
+	*A, *B, *C,	ACQUIRE, *D,		RELEASE, *E, *F
+	*A, *B,		ACQUIRE, *C,		RELEASE, *D, *E, *F
+	*B,		ACQUIRE, *C, *D,	RELEASE, {*F,*A}, *E
 
 
 
 INTERRUPT DISABLING FUNCTIONS
 -----------------------------
 
-Functions that disable interrupts (LOCK equivalent) and enable interrupts
-(UNLOCK equivalent) will act as compiler barriers only.  So if memory or I/O
+Functions that disable interrupts (ACQUIRE equivalent) and enable interrupts
+(RELEASE equivalent) will act as compiler barriers only.  So if memory or I/O
 barriers are required in such a situation, they must be provided from some
 other means.
 
@@ -1418,75 +1915,81 @@ Other functions that imply barriers:
  (*) schedule() and similar imply full memory barriers.
 
 
-=================================
-INTER-CPU LOCKING BARRIER EFFECTS
-=================================
+===================================
+INTER-CPU ACQUIRING BARRIER EFFECTS
+===================================
 
 On SMP systems locking primitives give a more substantial form of barrier: one
 that does affect memory access ordering on other CPUs, within the context of
 conflict on any particular lock.
 
 
-LOCKS VS MEMORY ACCESSES
-------------------------
+ACQUIRES VS MEMORY ACCESSES
+---------------------------
 
 Consider the following: the system has a pair of spinlocks (M) and (Q), and
 three CPUs; then should the following sequence of events occur:
 
 	CPU 1				CPU 2
 	===============================	===============================
-	*A = a;				*E = e;
-	LOCK M				LOCK Q
-	*B = b;				*F = f;
-	*C = c;				*G = g;
-	UNLOCK M			UNLOCK Q
-	*D = d;				*H = h;
+	ACCESS_ONCE(*A) = a;		ACCESS_ONCE(*E) = e;
+	ACQUIRE M			ACQUIRE Q
+	ACCESS_ONCE(*B) = b;		ACCESS_ONCE(*F) = f;
+	ACCESS_ONCE(*C) = c;		ACCESS_ONCE(*G) = g;
+	RELEASE M			RELEASE Q
+	ACCESS_ONCE(*D) = d;		ACCESS_ONCE(*H) = h;
 
 Then there is no guarantee as to what order CPU 3 will see the accesses to *A
 through *H occur in, other than the constraints imposed by the separate locks
 on the separate CPUs. It might, for example, see:
 
-	*E, LOCK M, LOCK Q, *G, *C, *F, *A, *B, UNLOCK Q, *D, *H, UNLOCK M
+	*E, ACQUIRE M, ACQUIRE Q, *G, *C, *F, *A, *B, RELEASE Q, *D, *H, RELEASE M
 
 But it won't see any of:
 
-	*B, *C or *D preceding LOCK M
-	*A, *B or *C following UNLOCK M
-	*F, *G or *H preceding LOCK Q
-	*E, *F or *G following UNLOCK Q
+	*B, *C or *D preceding ACQUIRE M
+	*A, *B or *C following RELEASE M
+	*F, *G or *H preceding ACQUIRE Q
+	*E, *F or *G following RELEASE Q
 
 
 However, if the following occurs:
 
 	CPU 1				CPU 2
 	===============================	===============================
-	*A = a;
-	LOCK M		[1]
-	*B = b;
-	*C = c;
-	UNLOCK M	[1]
-	*D = d;				*E = e;
-					LOCK M		[2]
-					*F = f;
-					*G = g;
-					UNLOCK M	[2]
-					*H = h;
+	ACCESS_ONCE(*A) = a;
+	ACQUIRE M		     [1]
+	ACCESS_ONCE(*B) = b;
+	ACCESS_ONCE(*C) = c;
+	RELEASE M	     [1]
+	ACCESS_ONCE(*D) = d;		ACCESS_ONCE(*E) = e;
+					ACQUIRE M		     [2]
+					smp_mb__after_unlock_lock();
+					ACCESS_ONCE(*F) = f;
+					ACCESS_ONCE(*G) = g;
+					RELEASE M	     [2]
+					ACCESS_ONCE(*H) = h;
 
 CPU 3 might see:
 
-	*E, LOCK M [1], *C, *B, *A, UNLOCK M [1],
-		LOCK M [2], *H, *F, *G, UNLOCK M [2], *D
+	*E, ACQUIRE M [1], *C, *B, *A, RELEASE M [1],
+		ACQUIRE M [2], *H, *F, *G, RELEASE M [2], *D
 
 But assuming CPU 1 gets the lock first, CPU 3 won't see any of:
 
-	*B, *C, *D, *F, *G or *H preceding LOCK M [1]
-	*A, *B or *C following UNLOCK M [1]
-	*F, *G or *H preceding LOCK M [2]
-	*A, *B, *C, *E, *F or *G following UNLOCK M [2]
+	*B, *C, *D, *F, *G or *H preceding ACQUIRE M [1]
+	*A, *B or *C following RELEASE M [1]
+	*F, *G or *H preceding ACQUIRE M [2]
+	*A, *B, *C, *E, *F or *G following RELEASE M [2]
 
+Note that the smp_mb__after_unlock_lock() is critically important
+here: Without it CPU 3 might see some of the above orderings.
+Without smp_mb__after_unlock_lock(), the accesses are not guaranteed
+to be seen in order unless CPU 3 holds lock M.
 
-LOCKS VS I/O ACCESSES
----------------------
+
+ACQUIRES VS I/O ACCESSES
+------------------------
 
 Under certain circumstances (especially involving NUMA), I/O accesses within
 two spinlocked sections on two different CPUs may be seen as interleaved by the
@@ -1687,28 +2190,30 @@ explicit lock operations, described later).  These include:
 
 	xchg();
 	cmpxchg();
-	atomic_xchg();
-	atomic_cmpxchg();
-	atomic_inc_return();
-	atomic_dec_return();
-	atomic_add_return();
-	atomic_sub_return();
-	atomic_inc_and_test();
-	atomic_dec_and_test();
-	atomic_sub_and_test();
-	atomic_add_negative();
-	atomic_add_unless();	/* when succeeds (returns 1) */
+	atomic_xchg();			atomic_long_xchg();
+	atomic_cmpxchg();		atomic_long_cmpxchg();
+	atomic_inc_return();		atomic_long_inc_return();
+	atomic_dec_return();		atomic_long_dec_return();
+	atomic_add_return();		atomic_long_add_return();
+	atomic_sub_return();		atomic_long_sub_return();
+	atomic_inc_and_test();		atomic_long_inc_and_test();
+	atomic_dec_and_test();		atomic_long_dec_and_test();
+	atomic_sub_and_test();		atomic_long_sub_and_test();
+	atomic_add_negative();		atomic_long_add_negative();
 	test_and_set_bit();
 	test_and_clear_bit();
 	test_and_change_bit();
 
-These are used for such things as implementing LOCK-class and UNLOCK-class
+	/* when succeeds (returns 1) */
+	atomic_add_unless();		atomic_long_add_unless();
+
+These are used for such things as implementing ACQUIRE-class and RELEASE-class
 operations and adjusting reference counters towards object destruction, and as
 such the implicit memory barrier effects are necessary.
 
 
 The following operations are potential problems as they do _not_ imply memory
-barriers, but might be used for implementing such things as UNLOCK-class
+barriers, but might be used for implementing such things as RELEASE-class
 operations:
 
 	atomic_set();
@@ -1750,7 +2255,7 @@ The following operations are special locking primitives:
 	clear_bit_unlock();
 	__clear_bit_unlock();
 
-These implement LOCK-class and UNLOCK-class operations. These should be used in
+These implement ACQUIRE-class and RELEASE-class operations. These should be used in
 preference to other operations when implementing locking primitives, because
 their implementations can be optimised on many architectures.
 
@@ -1887,8 +2392,8 @@ functions:
      space should suffice for PCI.
 
      [*] NOTE! attempting to load from the same location as was written to may
-     	 cause a malfunction - consider the 16550 Rx/Tx serial registers for
-     	 example.
+	 cause a malfunction - consider the 16550 Rx/Tx serial registers for
+	 example.
 
      Used with prefetchable I/O memory, an mmiowb() barrier may be required to
      force stores to be ordered.
@@ -1955,19 +2460,19 @@ barriers for the most part act at the interface between the CPU and its cache
 	                          :
 	+--------+    +--------+  :   +--------+    +-----------+
 	|        |    |        |  :   |        |    |           |    +--------+
-	|  CPU   |    | Memory |  :   | CPU    |    |           |    |	      |
-	|  Core  |--->| Access |----->| Cache  |<-->|           |    |	      |
+	|  CPU   |    | Memory |  :   | CPU    |    |           |    |        |
+	|  Core  |--->| Access |----->| Cache  |<-->|           |    |        |
 	|        |    | Queue  |  :   |        |    |           |--->| Memory |
-	|        |    |        |  :   |        |    |           |    |	      |
-	+--------+    +--------+  :   +--------+    |           |    | 	      |
+	|        |    |        |  :   |        |    |           |    |        |
+	+--------+    +--------+  :   +--------+    |           |    |        |
 	                          :                 | Cache     |    +--------+
 	                          :                 | Coherency |
 	                          :                 | Mechanism |    +--------+
 	+--------+    +--------+  :   +--------+    |           |    |	      |
 	|        |    |        |  :   |        |    |           |    |        |
 	|  CPU   |    | Memory |  :   | CPU    |    |           |--->| Device |
-	|  Core  |--->| Access |----->| Cache  |<-->|           |    | 	      |
-	|        |    | Queue  |  :   |        |    |           |    | 	      |
+	|  Core  |--->| Access |----->| Cache  |<-->|           |    |        |
+	|        |    | Queue  |  :   |        |    |           |    |        |
 	|        |    |        |  :   |        |    |           |    +--------+
 	+--------+    +--------+  :   +--------+    +-----------+
 	                          :
@@ -2090,7 +2595,7 @@ CPU's caches by some other cache event:
 	p = &v;		q = p;
 			<D:request p>
 	<B:modify p=&v>	<D:commit p=&v>
-		  	<D:read p>
+			<D:read p>
 			x = *q;
 			<C:read *q>	Reads from v before v updated in cache
 			<C:unbusy>
@@ -2115,7 +2620,7 @@ queue before processing any further requests:
 	p = &v;		q = p;
 			<D:request p>
 	<B:modify p=&v>	<D:commit p=&v>
-		  	<D:read p>
+			<D:read p>
 			smp_read_barrier_depends()
 			<C:unbusy>
 			<C:commit v=2>
@@ -2177,11 +2682,11 @@ A programmer might take it for granted that the CPU will perform memory
 operations in exactly the order specified, so that if the CPU is, for example,
 given the following piece of code to execute:
 
-	a = *A;
-	*B = b;
-	c = *C;
-	d = *D;
-	*E = e;
+	a = ACCESS_ONCE(*A);
+	ACCESS_ONCE(*B) = b;
+	c = ACCESS_ONCE(*C);
+	d = ACCESS_ONCE(*D);
+	ACCESS_ONCE(*E) = e;
 
 they would then expect that the CPU will complete the memory operation for each
 instruction before moving on to the next one, leading to a definite sequence of
@@ -2228,12 +2733,12 @@ However, it is guaranteed that a CPU will be self-consistent: it will see its
 _own_ accesses appear to be correctly ordered, without the need for a memory
 barrier.  For instance with the following code:
 
-	U = *A;
-	*A = V;
-	*A = W;
-	X = *A;
-	*A = Y;
-	Z = *A;
+	U = ACCESS_ONCE(*A);
+	ACCESS_ONCE(*A) = V;
+	ACCESS_ONCE(*A) = W;
+	X = ACCESS_ONCE(*A);
+	ACCESS_ONCE(*A) = Y;
+	Z = ACCESS_ONCE(*A);
 
 and assuming no intervention by an external influence, it can be assumed that
 the final result will appear to be:
@@ -2250,7 +2755,12 @@ accesses:
 
 in that order, but, without intervention, the sequence may have almost any
 combination of elements combined or discarded, provided the program's view of
-the world remains consistent.
+the world remains consistent.  Note that ACCESS_ONCE() is -not- optional
+in the above example, as there are architectures where a given CPU might
+interchange successive loads to the same location.  On such architectures,
+ACCESS_ONCE() does whatever is necessary to prevent this, for example, on
+Itanium the volatile casts used by ACCESS_ONCE() cause GCC to emit the
+special ld.acq and st.rel instructions that prevent such reordering.
 
 The compiler may also combine, discard or defer elements of the sequence before
 the CPU even sees them.
@@ -2264,13 +2774,13 @@ may be reduced to:
 
 	*A = W;
 
-since, without a write barrier, it can be assumed that the effect of the
-storage of V to *A is lost.  Similarly:
+since, without either a write barrier or an ACCESS_ONCE(), it can be
+assumed that the effect of the storage of V to *A is lost.  Similarly:
 
 	*A = Y;
 	Z = *A;
 
-may, without a memory barrier, be reduced to:
+may, without a memory barrier or an ACCESS_ONCE(), be reduced to:
 
 	*A = Y;
 	Z = Y;

+ 2 - 2
Documentation/robust-futex-ABI.txt

@@ -146,8 +146,8 @@ On removal:
  1) set the 'list_op_pending' word to the address of the 'lock entry'
     to be removed,
  2) remove the lock entry for this lock from the 'head' list,
- 2) release the futex lock, and
- 2) clear the 'lock_op_pending' word.
+ 3) release the futex lock, and
+ 4) clear the 'lock_op_pending' word.
 
 On exit, the kernel will consider the address stored in
 'list_op_pending' and the address of each 'lock word' found by walking

+ 0 - 5
Documentation/sysctl/kernel.txt

@@ -428,11 +428,6 @@ rate for each task.
 numa_balancing_scan_size_mb is how many megabytes worth of pages are
 scanned for a given scan.
 
-numa_balancing_settle_count is how many scan periods must complete before
-the schedule balancer stops pushing the task towards a preferred node. This
-gives the scheduler a chance to place the task on an alternative node if the
-preferred node is overloaded.
-
 numa_balancing_migrate_deferred is how many page migrations get skipped
 unconditionally, after a page migration is skipped because a page is shared
 with other tasks. This reduces page migration overhead, and determines

+ 3 - 0
Documentation/x86/boot.txt

@@ -608,6 +608,9 @@ Protocol:       2.12+
 	- If 1, the kernel supports the 64-bit EFI handoff entry point
           given at handover_offset + 0x200.
 
+  Bit 4 (read): XLF_EFI_KEXEC
+	- If 1, the kernel supports kexec EFI boot with EFI runtime support.
+
 Field name:	cmdline_size
 Type:		read
 Offset/size:	0x238/4

+ 7 - 0
Documentation/x86/x86_64/mm.txt

@@ -28,4 +28,11 @@ reference.
 Current X86-64 implementations only support 40 bits of address space,
 but we support up to 46 bits. This expands into MBZ space in the page tables.
 
+->trampoline_pgd:
+
+We map EFI runtime services in the aforementioned PGD in the virtual
+range of 64Gb (arbitrarily set, can be raised if needed)
+
+0xffffffef00000000 - 0xffffffff00000000
+
 -Andi Kleen, Jul 2004

+ 3 - 2
Documentation/zorro.txt

@@ -95,8 +95,9 @@ The treatment of these regions depends on the type of Zorro space:
 -------------
 
 linux/include/linux/zorro.h
-linux/include/asm-{m68k,ppc}/zorro.h
-linux/include/linux/zorro_ids.h
+linux/include/uapi/linux/zorro.h
+linux/include/uapi/linux/zorro_ids.h
+linux/arch/m68k/include/asm/zorro.h
 linux/drivers/zorro
 /proc/bus/zorro
 

+ 22 - 3
MAINTAINERS

@@ -783,7 +783,7 @@ F:	arch/arm/boot/dts/sama*.dts
 F:	arch/arm/boot/dts/sama*.dtsi
 
 ARM/CALXEDA HIGHBANK ARCHITECTURE
-M:	Rob Herring <rob.herring@calxeda.com>
+M:	Rob Herring <robh@kernel.org>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 F:	arch/arm/mach-highbank/
@@ -1368,6 +1368,9 @@ T:	git git://git.xilinx.com/linux-xlnx.git
 S:	Supported
 F:	arch/arm/mach-zynq/
 F:	drivers/cpuidle/cpuidle-zynq.c
+N:	zynq
+N:	xilinx
+F:	drivers/clocksource/cadence_ttc_timer.c
 
 ARM SMMU DRIVER
 M:	Will Deacon <will.deacon@arm.com>
@@ -2825,8 +2828,10 @@ F:	include/uapi/drm/
 
 INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
 M:	Daniel Vetter <daniel.vetter@ffwll.ch>
+M:	Jani Nikula <jani.nikula@linux.intel.com>
 L:	intel-gfx@lists.freedesktop.org
 L:	dri-devel@lists.freedesktop.org
+Q:	http://patchwork.freedesktop.org/project/intel-gfx/
 T:	git git://people.freedesktop.org/~danvet/drm-intel
 S:	Supported
 F:	drivers/gpu/drm/i915/
@@ -5136,6 +5141,11 @@ F:	drivers/lguest/
 F:	include/linux/lguest*.h
 F:	tools/lguest/
 
+LIBLOCKDEP
+M:	Sasha Levin <sasha.levin@oracle.com>
+S:	Maintained
+F:	tools/lib/lockdep/
+
 LINUX FOR IBM pSERIES (RS/6000)
 M:	Paul Mackerras <paulus@au.ibm.com>
 W:	http://www.ibm.com/linux/ltc/projects/ppc
@@ -6256,7 +6266,7 @@ F:	drivers/i2c/busses/i2c-ocores.c
 
 OPEN FIRMWARE AND FLATTENED DEVICE TREE
 M:	Grant Likely <grant.likely@linaro.org>
-M:	Rob Herring <rob.herring@calxeda.com>
+M:	Rob Herring <robh+dt@kernel.org>
 L:	devicetree@vger.kernel.org
 W:	http://fdt.secretlab.ca
 T:	git git://git.secretlab.ca/git/linux-2.6.git
@@ -6268,7 +6278,7 @@ K:	of_get_property
 K:	of_match_table
 
 OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
-M:	Rob Herring <rob.herring@calxeda.com>
+M:	Rob Herring <robh+dt@kernel.org>
 M:	Pawel Moll <pawel.moll@arm.com>
 M:	Mark Rutland <mark.rutland@arm.com>
 M:	Ian Campbell <ijc+devicetree@hellion.org.uk>
@@ -7094,6 +7104,12 @@ T:	git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
 F:	Documentation/RCU/torture.txt
 F:	kernel/rcu/torture.c
 
+RCUTORTURE TEST FRAMEWORK
+M:	"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+S:	Supported
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
+F:	tools/testing/selftests/rcutorture
+
 RDC R-321X SoC
 M:	Florian Fainelli <florian@openwrt.org>
 S:	Maintained
@@ -9226,6 +9242,7 @@ F:	include/media/videobuf2-*
 
 VIRTIO CONSOLE DRIVER
 M:	Amit Shah <amit.shah@redhat.com>
+L:	virtio-dev@lists.oasis-open.org
 L:	virtualization@lists.linux-foundation.org
 S:	Maintained
 F:	drivers/char/virtio_console.c
@@ -9235,6 +9252,7 @@ F:	include/uapi/linux/virtio_console.h
 VIRTIO CORE, NET AND BLOCK DRIVERS
 M:	Rusty Russell <rusty@rustcorp.com.au>
 M:	"Michael S. Tsirkin" <mst@redhat.com>
+L:	virtio-dev@lists.oasis-open.org
 L:	virtualization@lists.linux-foundation.org
 S:	Maintained
 F:	drivers/virtio/
@@ -9247,6 +9265,7 @@ F:	include/uapi/linux/virtio_*.h
 VIRTIO HOST (VHOST)
 M:	"Michael S. Tsirkin" <mst@redhat.com>
 L:	kvm@vger.kernel.org
+L:	virtio-dev@lists.oasis-open.org
 L:	virtualization@lists.linux-foundation.org
 L:	netdev@vger.kernel.org
 S:	Maintained

+ 18 - 4
Makefile

@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 13
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION =
 NAME = One Giant Leap for Frogkind
 
 # *DOCUMENTATION*
@@ -595,10 +595,24 @@ ifneq ($(CONFIG_FRAME_WARN),0)
 KBUILD_CFLAGS += $(call cc-option,-Wframe-larger-than=${CONFIG_FRAME_WARN})
 endif
 
-# Force gcc to behave correct even for buggy distributions
-ifndef CONFIG_CC_STACKPROTECTOR
-KBUILD_CFLAGS += $(call cc-option, -fno-stack-protector)
+# Handle stack protector mode.
+ifdef CONFIG_CC_STACKPROTECTOR_REGULAR
+  stackp-flag := -fstack-protector
+  ifeq ($(call cc-option, $(stackp-flag)),)
+    $(warning Cannot use CONFIG_CC_STACKPROTECTOR: \
+	      -fstack-protector not supported by compiler))
+  endif
+else ifdef CONFIG_CC_STACKPROTECTOR_STRONG
+  stackp-flag := -fstack-protector-strong
+  ifeq ($(call cc-option, $(stackp-flag)),)
+    $(warning Cannot use CONFIG_CC_STACKPROTECTOR_STRONG: \
+	      -fstack-protector-strong not supported by compiler)
+  endif
+else
+  # Force off for distro compilers that enable stack protector by default.
+  stackp-flag := $(call cc-option, -fno-stack-protector)
 endif
+KBUILD_CFLAGS += $(stackp-flag)
 
 # This warning generated too much noise in a regular build.
 # Use make W=1 to enable this warning (see scripts/Makefile.build)

+ 67 - 0
arch/Kconfig

@@ -336,6 +336,73 @@ config SECCOMP_FILTER
 
 	  See Documentation/prctl/seccomp_filter.txt for details.
 
+config HAVE_CC_STACKPROTECTOR
+	bool
+	help
+	  An arch should select this symbol if:
+	  - its compiler supports the -fstack-protector option
+	  - it has implemented a stack canary (e.g. __stack_chk_guard)
+
+config CC_STACKPROTECTOR
+	def_bool n
+	help
+	  Set when a stack-protector mode is enabled, so that the build
+	  can enable kernel-side support for the GCC feature.
+
+choice
+	prompt "Stack Protector buffer overflow detection"
+	depends on HAVE_CC_STACKPROTECTOR
+	default CC_STACKPROTECTOR_NONE
+	help
+	  This option turns on the "stack-protector" GCC feature. This
+	  feature puts, at the beginning of functions, a canary value on
+	  the stack just before the return address, and validates
+	  the value just before actually returning.  Stack based buffer
+	  overflows (that need to overwrite this return address) now also
+	  overwrite the canary, which gets detected and the attack is then
+	  neutralized via a kernel panic.
+
+config CC_STACKPROTECTOR_NONE
+	bool "None"
+	help
+	  Disable "stack-protector" GCC feature.
+
+config CC_STACKPROTECTOR_REGULAR
+	bool "Regular"
+	select CC_STACKPROTECTOR
+	help
+	  Functions will have the stack-protector canary logic added if they
+	  have an 8-byte or larger character array on the stack.
+
+	  This feature requires gcc version 4.2 or above, or a distribution
+	  gcc with the feature backported ("-fstack-protector").
+
+	  On an x86 "defconfig" build, this feature adds canary checks to
+	  about 3% of all kernel functions, which increases kernel code size
+	  by about 0.3%.
+
+config CC_STACKPROTECTOR_STRONG
+	bool "Strong"
+	select CC_STACKPROTECTOR
+	help
+	  Functions will have the stack-protector canary logic added in any
+	  of the following conditions:
+
+	  - local variable's address used as part of the right hand side of an
+	    assignment or function argument
+	  - local variable is an array (or union containing an array),
+	    regardless of array type or length
+	  - uses register local variables
+
+	  This feature requires gcc version 4.9 or above, or a distribution
+	  gcc with the feature backported ("-fstack-protector-strong").
+
+	  On an x86 "defconfig" build, this feature adds canary checks to
+	  about 20% of all kernel functions, which increases the kernel code
+	  size by about 2%.
+
+endchoice
+
 config HAVE_CONTEXT_TRACKING
 	bool
 	help

+ 5 - 20
arch/alpha/include/asm/barrier.h

@@ -3,33 +3,18 @@
 
 #include <asm/compiler.h>
 
-#define mb() \
-__asm__ __volatile__("mb": : :"memory")
+#define mb()	__asm__ __volatile__("mb": : :"memory")
+#define rmb()	__asm__ __volatile__("mb": : :"memory")
+#define wmb()	__asm__ __volatile__("wmb": : :"memory")
 
-#define rmb() \
-__asm__ __volatile__("mb": : :"memory")
-
-#define wmb() \
-__asm__ __volatile__("wmb": : :"memory")
-
-#define read_barrier_depends() \
-__asm__ __volatile__("mb": : :"memory")
+#define read_barrier_depends() __asm__ __volatile__("mb": : :"memory")
 
 #ifdef CONFIG_SMP
 #define __ASM_SMP_MB	"\tmb\n"
-#define smp_mb()	mb()
-#define smp_rmb()	rmb()
-#define smp_wmb()	wmb()
-#define smp_read_barrier_depends()	read_barrier_depends()
 #else
 #define __ASM_SMP_MB
-#define smp_mb()	barrier()
-#define smp_rmb()	barrier()
-#define smp_wmb()	barrier()
-#define smp_read_barrier_depends()	do { } while (0)
 #endif
 
-#define set_mb(var, value) \
-do { var = value; mb(); } while (0)
+#include <asm-generic/barrier.h>
 
 #endif		/* __BARRIER_H */

+ 1 - 0
arch/arc/include/asm/Kbuild

@@ -1,4 +1,5 @@
 generic-y += auxvec.h
+generic-y += barrier.h
 generic-y += bugs.h
 generic-y += bitsperlong.h
 generic-y += clkdev.h

+ 5 - 0
arch/arc/include/asm/atomic.h

@@ -190,6 +190,11 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
 
 #endif /* !CONFIG_ARC_HAS_LLSC */
 
+#define smp_mb__before_atomic_dec()	barrier()
+#define smp_mb__after_atomic_dec()	barrier()
+#define smp_mb__before_atomic_inc()	barrier()
+#define smp_mb__after_atomic_inc()	barrier()
+
 /**
  * __atomic_add_unless - add unless the number is a given value
  * @v: pointer of type atomic_t

+ 0 - 5
arch/arc/include/asm/barrier.h

@@ -30,11 +30,6 @@
 #define smp_wmb()       barrier()
 #endif
 
-#define smp_mb__before_atomic_dec()	barrier()
-#define smp_mb__after_atomic_dec()	barrier()
-#define smp_mb__before_atomic_inc()	barrier()
-#define smp_mb__after_atomic_inc()	barrier()
-
 #define smp_read_barrier_depends()      do { } while (0)
 
 #endif

+ 1 - 12
arch/arm/Kconfig

@@ -30,6 +30,7 @@ config ARM
 	select HAVE_BPF_JIT
 	select HAVE_CONTEXT_TRACKING
 	select HAVE_C_RECORDMCOUNT
+	select HAVE_CC_STACKPROTECTOR
 	select HAVE_DEBUG_KMEMLEAK
 	select HAVE_DMA_API_DEBUG
 	select HAVE_DMA_ATTRS
@@ -1856,18 +1857,6 @@ config SECCOMP
 	  and the task is only allowed to execute a few safe syscalls
 	  defined by each seccomp mode.
 
-config CC_STACKPROTECTOR
-	bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
-	help
-	  This option turns on the -fstack-protector GCC feature. This
-	  feature puts, at the beginning of functions, a canary value on
-	  the stack just before the return address, and validates
-	  the value just before actually returning.  Stack based buffer
-	  overflows (that need to overwrite this return address) now also
-	  overwrite the canary, which gets detected and the attack is then
-	  neutralized via a kernel panic.
-	  This feature requires gcc version 4.2 or above.
-
 config SWIOTLB
 	def_bool y
 

+ 0 - 4
arch/arm/Makefile

@@ -40,10 +40,6 @@ ifeq ($(CONFIG_FRAME_POINTER),y)
 KBUILD_CFLAGS	+=-fno-omit-frame-pointer -mapcs -mno-sched-prolog
 endif
 
-ifeq ($(CONFIG_CC_STACKPROTECTOR),y)
-KBUILD_CFLAGS	+=-fstack-protector
-endif
-
 ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
 KBUILD_CPPFLAGS	+= -mbig-endian
 AS		+= -EB

+ 14 - 0
arch/arm/boot/compressed/misc.c

@@ -127,6 +127,18 @@ asmlinkage void __div0(void)
 	error("Attempting division by 0!");
 }
 
+unsigned long __stack_chk_guard;
+
+void __stack_chk_guard_setup(void)
+{
+	__stack_chk_guard = 0x000a0dff;
+}
+
+void __stack_chk_fail(void)
+{
+	error("stack-protector: Kernel stack is corrupted\n");
+}
+
 extern int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x));
 
 
@@ -137,6 +149,8 @@ decompress_kernel(unsigned long output_start, unsigned long free_mem_ptr_p,
 {
 	int ret;
 
+	__stack_chk_guard_setup();
+
 	output_data		= (unsigned char *)output_start;
 	free_mem_ptr		= free_mem_ptr_p;
 	free_mem_end_ptr	= free_mem_ptr_end_p;

+ 1 - 1
arch/arm/boot/dts/exynos5250.dtsi

@@ -559,7 +559,7 @@
 			compatible = "arm,pl330", "arm,primecell";
 			reg = <0x10800000 0x1000>;
 			interrupts = <0 33 0>;
-			clocks = <&clock 271>;
+			clocks = <&clock 346>;
 			clock-names = "apb_pclk";
 			#dma-cells = <1>;
 			#dma-channels = <8>;

+ 7 - 0
arch/arm/boot/dts/sun5i-a10s.dtsi

@@ -332,5 +332,12 @@
 			clock-frequency = <100000>;
 			status = "disabled";
 		};
+
+		timer@01c60000 {
+			compatible = "allwinner,sun5i-a13-hstimer";
+			reg = <0x01c60000 0x1000>;
+			interrupts = <82>, <83>;
+			clocks = <&ahb_gates 28>;
+		};
 	};
 };

+ 7 - 0
arch/arm/boot/dts/sun5i-a13.dtsi

@@ -273,5 +273,12 @@
 			clock-frequency = <100000>;
 			status = "disabled";
 		};
+
+		timer@01c60000 {
+			compatible = "allwinner,sun5i-a13-hstimer";
+			reg = <0x01c60000 0x1000>;
+			interrupts = <82>, <83>;
+			clocks = <&ahb_gates 28>;
+		};
 	};
 };

+ 10 - 0
arch/arm/boot/dts/sun7i-a20.dtsi

@@ -395,6 +395,16 @@
 			status = "disabled";
 		};
 
+		hstimer@01c60000 {
+			compatible = "allwinner,sun7i-a20-hstimer";
+			reg = <0x01c60000 0x1000>;
+			interrupts = <0 81 1>,
+				     <0 82 1>,
+				     <0 83 1>,
+				     <0 84 1>;
+			clocks = <&ahb_gates 28>;
+		};
+
 		gic: interrupt-controller@01c81000 {
 			compatible = "arm,cortex-a7-gic", "arm,cortex-a15-gic";
 			reg = <0x01c81000 0x1000>,

+ 1 - 1
arch/arm/crypto/aesbs-core.S_shipped

@@ -58,7 +58,7 @@
 # define VFP_ABI_FRAME	0
 # define BSAES_ASM_EXTENDED_KEY
 # define XTS_CHAIN_TWEAK
-# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+# define __ARM_ARCH__	7
 #endif
 
 #ifdef __thumb__

+ 1 - 1
arch/arm/crypto/bsaes-armv7.pl

@@ -701,7 +701,7 @@ $code.=<<___;
 # define VFP_ABI_FRAME	0
 # define BSAES_ASM_EXTENDED_KEY
 # define XTS_CHAIN_TWEAK
-# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+# define __ARM_ARCH__	7
 #endif
 
 #ifdef __thumb__

+ 15 - 0
arch/arm/include/asm/barrier.h

@@ -59,6 +59,21 @@
 #define smp_wmb()	dmb(ishst)
 #endif
 
+#define smp_store_release(p, v)						\
+do {									\
+	compiletime_assert_atomic_type(*p);				\
+	smp_mb();							\
+	ACCESS_ONCE(*p) = (v);						\
+} while (0)
+
+#define smp_load_acquire(p)						\
+({									\
+	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	compiletime_assert_atomic_type(*p);				\
+	smp_mb();							\
+	___p1;								\
+})
+
 #define read_barrier_depends()		do { } while(0)
 #define smp_read_barrier_depends()	do { } while(0)
 

+ 1 - 1
arch/arm/include/asm/io.h

@@ -329,7 +329,7 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
  */
 #define ioremap(cookie,size)		__arm_ioremap((cookie), (size), MT_DEVICE)
 #define ioremap_nocache(cookie,size)	__arm_ioremap((cookie), (size), MT_DEVICE)
-#define ioremap_cached(cookie,size)	__arm_ioremap((cookie), (size), MT_DEVICE_CACHED)
+#define ioremap_cache(cookie,size)	__arm_ioremap((cookie), (size), MT_DEVICE_CACHED)
 #define ioremap_wc(cookie,size)		__arm_ioremap((cookie), (size), MT_DEVICE_WC)
 #define iounmap				__arm_iounmap
 

+ 2 - 1
arch/arm/include/asm/memory.h

@@ -347,7 +347,8 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
 #define ARCH_PFN_OFFSET		PHYS_PFN_OFFSET
 
 #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define virt_addr_valid(kaddr)	((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
+#define virt_addr_valid(kaddr)	(((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
+					&& pfn_valid(__pa(kaddr) >> PAGE_SHIFT) )
 
 #endif
 

+ 1 - 1
arch/arm/include/asm/unistd.h

@@ -15,7 +15,7 @@
 
 #include <uapi/asm/unistd.h>
 
-#define __NR_syscalls  (380)
+#define __NR_syscalls  (384)
 #define __ARM_NR_cmpxchg		(__ARM_NR_BASE+0x00fff0)
 
 #define __ARCH_WANT_STAT64

+ 1 - 1
arch/arm/include/asm/xen/page.h

@@ -117,6 +117,6 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
 	return __set_phys_to_machine(pfn, mfn);
 }
 
-#define xen_remap(cookie, size) ioremap_cached((cookie), (size));
+#define xen_remap(cookie, size) ioremap_cache((cookie), (size));
 
 #endif /* _ASM_ARM_XEN_PAGE_H */

+ 2 - 0
arch/arm/include/uapi/asm/unistd.h

@@ -406,6 +406,8 @@
 #define __NR_process_vm_writev		(__NR_SYSCALL_BASE+377)
 #define __NR_kcmp			(__NR_SYSCALL_BASE+378)
 #define __NR_finit_module		(__NR_SYSCALL_BASE+379)
+#define __NR_sched_setattr		(__NR_SYSCALL_BASE+380)
+#define __NR_sched_getattr		(__NR_SYSCALL_BASE+381)
 
 /*
  * This may need to be greater than __NR_last_syscall+1 in order to

+ 2 - 0
arch/arm/kernel/calls.S

@@ -389,6 +389,8 @@
 		CALL(sys_process_vm_writev)
 		CALL(sys_kcmp)
 		CALL(sys_finit_module)
+/* 380 */	CALL(sys_sched_setattr)
+		CALL(sys_sched_getattr)
 #ifndef syscalls_counted
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 #define syscalls_counted

+ 1 - 1
arch/arm/kernel/devtree.c

@@ -171,7 +171,7 @@ void __init arm_dt_init_cpu_maps(void)
 
 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
 {
-	return (phys_id & MPIDR_HWID_BITMASK) == cpu_logical_map(cpu);
+	return phys_id == cpu_logical_map(cpu);
 }
 
 static const void * __init arch_get_next_mach(const char *const **match)

+ 0 - 4
arch/arm/kernel/perf_event.c

@@ -99,10 +99,6 @@ int armpmu_event_set_period(struct perf_event *event)
 	s64 period = hwc->sample_period;
 	int ret = 0;
 
-	/* The period may have been changed by PERF_EVENT_IOC_PERIOD */
-	if (unlikely(period != hwc->last_period))
-		left = period - (hwc->last_period - left);
-
 	if (unlikely(left <= -period)) {
 		left = period;
 		local64_set(&hwc->period_left, left);

+ 1 - 1
arch/arm/kernel/perf_event_cpu.c

@@ -254,7 +254,7 @@ static int probe_current_pmu(struct arm_pmu *pmu)
 static int cpu_pmu_device_probe(struct platform_device *pdev)
 {
 	const struct of_device_id *of_id;
-	int (*init_fn)(struct arm_pmu *);
+	const int (*init_fn)(struct arm_pmu *);
 	struct device_node *node = pdev->dev.of_node;
 	struct arm_pmu *pmu;
 	int ret = -ENODEV;

+ 10 - 3
arch/arm/kernel/traps.c

@@ -36,7 +36,13 @@
 #include <asm/system_misc.h>
 #include <asm/opcodes.h>
 
-static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
+static const char *handler[]= {
+	"prefetch abort",
+	"data abort",
+	"address exception",
+	"interrupt",
+	"undefined instruction",
+};
 
 void *vectors_page;
 
@@ -425,9 +431,10 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
 			instr2 = __mem_to_opcode_thumb16(instr2);
 			instr = __opcode_thumb32_compose(instr, instr2);
 		}
-	} else if (get_user(instr, (u32 __user *)pc)) {
+	} else {
+		if (get_user(instr, (u32 __user *)pc))
+			goto die_sig;
 		instr = __mem_to_opcode_arm(instr);
-		goto die_sig;
 	}
 
 	if (call_undef_hook(regs, instr) == 0)

+ 3 - 2
arch/arm/mach-footbridge/dc21285-timer.c

@@ -96,11 +96,12 @@ static struct irqaction footbridge_timer_irq = {
 void __init footbridge_timer_init(void)
 {
 	struct clock_event_device *ce = &ckevt_dc21285;
+	unsigned rate = DIV_ROUND_CLOSEST(mem_fclk_21285, 16);
 
-	clocksource_register_hz(&cksrc_dc21285, (mem_fclk_21285 + 8) / 16);
+	clocksource_register_hz(&cksrc_dc21285, rate);
 
 	setup_irq(ce->irq, &footbridge_timer_irq);
 
 	ce->cpumask = cpumask_of(smp_processor_id());
-	clockevents_config_and_register(ce, mem_fclk_21285, 0x4, 0xffffff);
+	clockevents_config_and_register(ce, rate, 0x4, 0xffffff);
 }

+ 1 - 0
arch/arm/mach-highbank/highbank.c

@@ -53,6 +53,7 @@ static void __init highbank_scu_map_io(void)
 
 static void highbank_l2x0_disable(void)
 {
+	outer_flush_all();
 	/* Disable PL310 L2 Cache controller */
 	highbank_smc1(0x102, 0x0);
 }

+ 1 - 0
arch/arm/mach-omap2/omap4-common.c

@@ -162,6 +162,7 @@ void __iomem *omap4_get_l2cache_base(void)
 
 static void omap4_l2x0_disable(void)
 {
+	outer_flush_all();
 	/* Disable PL310 L2 Cache controller */
 	omap_smc1(0x102, 0x0);
 }

+ 2 - 2
arch/arm/mach-shmobile/board-armadillo800eva.c

@@ -483,7 +483,7 @@ static struct platform_device lcdc0_device = {
 	.id		= 0,
 	.dev	= {
 		.platform_data	= &lcdc0_info,
-		.coherent_dma_mask = ~0,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 	},
 };
 
@@ -580,7 +580,7 @@ static struct platform_device hdmi_lcdc_device = {
 	.id		= 1,
 	.dev	= {
 		.platform_data	= &hdmi_lcdc_info,
-		.coherent_dma_mask = ~0,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 	},
 };
 

+ 1 - 1
arch/arm/mach-shmobile/board-kzm9g.c

@@ -334,7 +334,7 @@ static struct platform_device lcdc_device = {
 	.resource	= lcdc_resources,
 	.dev	= {
 		.platform_data	= &lcdc_info,
-		.coherent_dma_mask = ~0,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 	},
 };
 

+ 2 - 2
arch/arm/mach-shmobile/board-mackerel.c

@@ -409,7 +409,7 @@ static struct platform_device lcdc_device = {
 	.resource	= lcdc_resources,
 	.dev	= {
 		.platform_data	= &lcdc_info,
-		.coherent_dma_mask = ~0,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 	},
 };
 
@@ -499,7 +499,7 @@ static struct platform_device hdmi_lcdc_device = {
 	.id		= 1,
 	.dev	= {
 		.platform_data	= &hdmi_lcdc_info,
-		.coherent_dma_mask = ~0,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 	},
 };
 

+ 1 - 0
arch/arm/mach-sunxi/Kconfig

@@ -12,3 +12,4 @@ config ARCH_SUNXI
 	select PINCTRL_SUNXI
 	select SPARSE_IRQ
 	select SUN4I_TIMER
+	select SUN5I_HSTIMER

+ 3 - 3
arch/arm/mm/flush.c

@@ -175,16 +175,16 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
 		unsigned long i;
 		if (cache_is_vipt_nonaliasing()) {
 			for (i = 0; i < (1 << compound_order(page)); i++) {
-				void *addr = kmap_atomic(page);
+				void *addr = kmap_atomic(page + i);
 				__cpuc_flush_dcache_area(addr, PAGE_SIZE);
 				kunmap_atomic(addr);
 			}
 		} else {
 			for (i = 0; i < (1 << compound_order(page)); i++) {
-				void *addr = kmap_high_get(page);
+				void *addr = kmap_high_get(page + i);
 				if (addr) {
 					__cpuc_flush_dcache_area(addr, PAGE_SIZE);
-					kunmap_high(page);
+					kunmap_high(page + i);
 				}
 			}
 		}

+ 1 - 1
arch/arm/mm/init.c

@@ -229,7 +229,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc)
 #ifdef CONFIG_ZONE_DMA
 	if (mdesc->dma_zone_size) {
 		arm_dma_zone_size = mdesc->dma_zone_size;
-		arm_dma_limit = __pv_phys_offset + arm_dma_zone_size - 1;
+		arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
 	} else
 		arm_dma_limit = 0xffffffff;
 	arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;

+ 3 - 3
arch/arm/net/bpf_jit_32.c

@@ -641,10 +641,10 @@ load_ind:
 			emit(ARM_MUL(r_A, r_A, r_X), ctx);
 			break;
 		case BPF_S_ALU_DIV_K:
-			/* current k == reciprocal_value(userspace k) */
+			if (k == 1)
+				break;
 			emit_mov_i(r_scratch, k, ctx);
-			/* A = top 32 bits of the product */
-			emit(ARM_UMULL(r_scratch, r_A, r_A, r_scratch), ctx);
+			emit_udiv(r_A, r_A, r_scratch, ctx);
 			break;
 		case BPF_S_ALU_DIV_X:
 			update_on_xread(ctx);

+ 50 - 0
arch/arm64/include/asm/barrier.h

@@ -35,10 +35,60 @@
 #define smp_mb()	barrier()
 #define smp_rmb()	barrier()
 #define smp_wmb()	barrier()
+
+#define smp_store_release(p, v)						\
+do {									\
+	compiletime_assert_atomic_type(*p);				\
+	smp_mb();							\
+	ACCESS_ONCE(*p) = (v);						\
+} while (0)
+
+#define smp_load_acquire(p)						\
+({									\
+	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	compiletime_assert_atomic_type(*p);				\
+	smp_mb();							\
+	___p1;								\
+})
+
 #else
+
 #define smp_mb()	asm volatile("dmb ish" : : : "memory")
 #define smp_rmb()	asm volatile("dmb ishld" : : : "memory")
 #define smp_wmb()	asm volatile("dmb ishst" : : : "memory")
+
+#define smp_store_release(p, v)						\
+do {									\
+	compiletime_assert_atomic_type(*p);				\
+	switch (sizeof(*p)) {						\
+	case 4:								\
+		asm volatile ("stlr %w1, %0"				\
+				: "=Q" (*p) : "r" (v) : "memory");	\
+		break;							\
+	case 8:								\
+		asm volatile ("stlr %1, %0"				\
+				: "=Q" (*p) : "r" (v) : "memory");	\
+		break;							\
+	}								\
+} while (0)
+
+#define smp_load_acquire(p)						\
+({									\
+	typeof(*p) ___p1;						\
+	compiletime_assert_atomic_type(*p);				\
+	switch (sizeof(*p)) {						\
+	case 4:								\
+		asm volatile ("ldar %w0, %1"				\
+			: "=r" (___p1) : "Q" (*p) : "memory");		\
+		break;							\
+	case 8:								\
+		asm volatile ("ldar %0, %1"				\
+			: "=r" (___p1) : "Q" (*p) : "memory");		\
+		break;							\
+	}								\
+	___p1;								\
+})
+
 #endif
 
 #define read_barrier_depends()		do { } while(0)

+ 1 - 1
arch/arm64/include/asm/io.h

@@ -229,7 +229,7 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot
 extern void __iounmap(volatile void __iomem *addr);
 extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
 
-#define PROT_DEFAULT		(pgprot_default | PTE_DIRTY)
+#define PROT_DEFAULT		(PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY)
 #define PROT_DEVICE_nGnRE	(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
 #define PROT_NORMAL_NC		(PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC))
 #define PROT_NORMAL		(PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))

+ 5 - 12
arch/avr32/include/asm/barrier.h

@@ -8,22 +8,15 @@
 #ifndef __ASM_AVR32_BARRIER_H
 #define __ASM_AVR32_BARRIER_H
 
-#define nop()			asm volatile("nop")
-
-#define mb()			asm volatile("" : : : "memory")
-#define rmb()			mb()
-#define wmb()			asm volatile("sync 0" : : : "memory")
-#define read_barrier_depends()  do { } while(0)
-#define set_mb(var, value)      do { var = value; mb(); } while(0)
+/*
+ * Weirdest thing ever.. no full barrier, but it has a write barrier!
+ */
+#define wmb()	asm volatile("sync 0" : : : "memory")
 
 #ifdef CONFIG_SMP
 # error "The AVR32 port does not support SMP"
-#else
-# define smp_mb()		barrier()
-# define smp_rmb()		barrier()
-# define smp_wmb()		barrier()
-# define smp_read_barrier_depends() do { } while(0)
 #endif
 
+#include <asm-generic/barrier.h>
 
 #endif /* __ASM_AVR32_BARRIER_H */

+ 1 - 17
arch/blackfin/include/asm/barrier.h

@@ -23,26 +23,10 @@
 # define rmb()	do { barrier(); smp_check_barrier(); } while (0)
 # define wmb()	do { barrier(); smp_mark_barrier(); } while (0)
 # define read_barrier_depends()	do { barrier(); smp_check_barrier(); } while (0)
-#else
-# define mb()	barrier()
-# define rmb()	barrier()
-# define wmb()	barrier()
-# define read_barrier_depends()	do { } while (0)
 #endif
 
-#else /* !CONFIG_SMP */
-
-#define mb()	barrier()
-#define rmb()	barrier()
-#define wmb()	barrier()
-#define read_barrier_depends()	do { } while (0)
-
 #endif /* !CONFIG_SMP */
 
-#define smp_mb()  mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#define set_mb(var, value) do { var = value; mb(); } while (0)
-#define smp_read_barrier_depends()	read_barrier_depends()
+#include <asm-generic/barrier.h>
 
 #endif /* _BLACKFIN_BARRIER_H */

+ 1 - 0
arch/cris/include/asm/Kbuild

@@ -3,6 +3,7 @@ header-y += arch-v10/
 header-y += arch-v32/
 
 
+generic-y += barrier.h
 generic-y += clkdev.h
 generic-y += exec.h
 generic-y += kvm_para.h

+ 0 - 25
arch/cris/include/asm/barrier.h

@@ -1,25 +0,0 @@
-#ifndef __ASM_CRIS_BARRIER_H
-#define __ASM_CRIS_BARRIER_H
-
-#define nop() __asm__ __volatile__ ("nop");
-
-#define barrier() __asm__ __volatile__("": : :"memory")
-#define mb() barrier()
-#define rmb() mb()
-#define wmb() mb()
-#define read_barrier_depends() do { } while(0)
-#define set_mb(var, value)  do { var = value; mb(); } while (0)
-
-#ifdef CONFIG_SMP
-#define smp_mb()        mb()
-#define smp_rmb()       rmb()
-#define smp_wmb()       wmb()
-#define smp_read_barrier_depends()     read_barrier_depends()
-#else
-#define smp_mb()        barrier()
-#define smp_rmb()       barrier()
-#define smp_wmb()       barrier()
-#define smp_read_barrier_depends()     do { } while(0)
-#endif
-
-#endif /* __ASM_CRIS_BARRIER_H */

+ 1 - 7
arch/frv/include/asm/barrier.h

@@ -17,13 +17,7 @@
 #define mb()			asm volatile ("membar" : : :"memory")
 #define rmb()			asm volatile ("membar" : : :"memory")
 #define wmb()			asm volatile ("membar" : : :"memory")
-#define read_barrier_depends()	do { } while (0)
 
-#define smp_mb()			barrier()
-#define smp_rmb()			barrier()
-#define smp_wmb()			barrier()
-#define smp_read_barrier_depends()	do {} while(0)
-#define set_mb(var, value) \
-	do { var = (value); barrier(); } while (0)
+#include <asm-generic/barrier.h>
 
 #endif /* _ASM_BARRIER_H */

+ 1 - 0
arch/hexagon/include/asm/Kbuild

@@ -2,6 +2,7 @@
 header-y += ucontext.h
 
 generic-y += auxvec.h
+generic-y += barrier.h
 generic-y += bug.h
 generic-y += bugs.h
 generic-y += clkdev.h

+ 5 - 1
arch/hexagon/include/asm/atomic.h

@@ -160,8 +160,12 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, (v)) == 0)
 #define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
 
-
 #define atomic_inc_return(v) (atomic_add_return(1, v))
 #define atomic_dec_return(v) (atomic_sub_return(1, v))
 
+#define smp_mb__before_atomic_dec()	barrier()
+#define smp_mb__after_atomic_dec()	barrier()
+#define smp_mb__before_atomic_inc()	barrier()
+#define smp_mb__after_atomic_inc()	barrier()
+
 #endif

+ 0 - 4
arch/hexagon/include/asm/barrier.h

@@ -29,10 +29,6 @@
 #define smp_read_barrier_depends()	barrier()
 #define smp_wmb()			barrier()
 #define smp_mb()			barrier()
-#define smp_mb__before_atomic_dec()	barrier()
-#define smp_mb__after_atomic_dec()	barrier()
-#define smp_mb__before_atomic_inc()	barrier()
-#define smp_mb__after_atomic_inc()	barrier()
 
 /*  Set a value and use a memory barrier.  Used by the scheduler somewhere.  */
 #define set_mb(var, value) \

+ 0 - 12
arch/ia64/Kconfig

@@ -147,9 +147,6 @@ config PARAVIRT
 	  over full virtualization.  However, when run without a hypervisor
 	  the kernel is theoretically slower and slightly larger.
 
-
-source "arch/ia64/xen/Kconfig"
-
 endif
 
 choice
@@ -175,7 +172,6 @@ config IA64_GENERIC
 	  SGI-SN2		For SGI Altix systems
 	  SGI-UV		For SGI UV systems
 	  Ski-simulator		For the HP simulator <http://www.hpl.hp.com/research/linux/ski/>
-	  Xen-domU		For xen domU system
 
 	  If you don't know what to do, choose "generic".
 
@@ -231,14 +227,6 @@ config IA64_HP_SIM
 	bool "Ski-simulator"
 	select SWIOTLB
 
-config IA64_XEN_GUEST
-	bool "Xen guest"
-	select SWIOTLB
-	depends on XEN
-	help
-	  Build a kernel that runs on Xen guest domain. At this moment only
-	  16KB page size in supported.
-
 endchoice
 
 choice

+ 0 - 2
arch/ia64/Makefile

@@ -51,11 +51,9 @@ core-$(CONFIG_IA64_DIG_VTD) 	+= arch/ia64/dig/
 core-$(CONFIG_IA64_GENERIC) 	+= arch/ia64/dig/
 core-$(CONFIG_IA64_HP_ZX1)	+= arch/ia64/dig/
 core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/
-core-$(CONFIG_IA64_XEN_GUEST)	+= arch/ia64/dig/
 core-$(CONFIG_IA64_SGI_SN2)	+= arch/ia64/sn/
 core-$(CONFIG_IA64_SGI_UV)	+= arch/ia64/uv/
 core-$(CONFIG_KVM) 		+= arch/ia64/kvm/
-core-$(CONFIG_XEN)		+= arch/ia64/xen/
 
 drivers-$(CONFIG_PCI)		+= arch/ia64/pci/
 drivers-$(CONFIG_IA64_HP_SIM)	+= arch/ia64/hp/sim/

+ 0 - 199
arch/ia64/configs/xen_domu_defconfig

@@ -1,199 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=20
-CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARAVIRT_GUEST=y
-CONFIG_IA64_XEN_GUEST=y
-CONFIG_MCKINLEY=y
-CONFIG_IA64_CYCLONE=y
-CONFIG_SMP=y
-CONFIG_NR_CPUS=16
-CONFIG_HOTPLUG_CPU=y
-CONFIG_PERMIT_BSP_REMOVE=y
-CONFIG_FORCE_CPEI_RETARGET=y
-CONFIG_IA64_MCA_RECOVERY=y
-CONFIG_PERFMON=y
-CONFIG_IA64_PALINFO=y
-CONFIG_KEXEC=y
-CONFIG_EFI_VARS=y
-CONFIG_BINFMT_MISC=m
-CONFIG_ACPI_PROCFS=y
-CONFIG_ACPI_BUTTON=m
-CONFIG_ACPI_FAN=m
-CONFIG_ACPI_PROCESSOR=m
-CONFIG_ACPI_CONTAINER=m
-CONFIG_HOTPLUG_PCI=y
-CONFIG_HOTPLUG_PCI_ACPI=m
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_ARPD=y
-CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
-# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
-CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_CMD64X=y
-CONFIG_BLK_DEV_PIIX=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=m
-CONFIG_BLK_DEV_SR=m
-CONFIG_CHR_DEV_SG=m
-CONFIG_SCSI_SYM53C8XX_2=y
-CONFIG_SCSI_QLOGIC_1280=y
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
-CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_MULTIPATH=m
-CONFIG_BLK_DEV_DM=m
-CONFIG_DM_CRYPT=m
-CONFIG_DM_SNAPSHOT=m
-CONFIG_DM_MIRROR=m
-CONFIG_DM_ZERO=m
-CONFIG_FUSION=y
-CONFIG_FUSION_SPI=y
-CONFIG_FUSION_FC=y
-CONFIG_FUSION_CTL=y
-CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_TULIP=y
-CONFIG_TULIP=m
-CONFIG_NET_PCI=y
-CONFIG_NET_VENDOR_INTEL=y
-CONFIG_E100=m
-CONFIG_E1000=y
-CONFIG_TIGON3=y
-CONFIG_NETCONSOLE=y
-# CONFIG_SERIO_SERPORT is not set
-CONFIG_GAMEPORT=m
-CONFIG_SERIAL_NONSTANDARD=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=6
-CONFIG_SERIAL_8250_EXTENDED=y
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_EFI_RTC=y
-CONFIG_RAW_DRIVER=m
-CONFIG_HPET=y
-CONFIG_AGP=m
-CONFIG_DRM=m
-CONFIG_DRM_TDFX=m
-CONFIG_DRM_R128=m
-CONFIG_DRM_RADEON=m
-CONFIG_DRM_MGA=m
-CONFIG_DRM_SIS=m
-CONFIG_HID_GYRATION=y
-CONFIG_HID_NTRIG=y
-CONFIG_HID_PANTHERLORD=y
-CONFIG_HID_PETALYNX=y
-CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
-CONFIG_HID_SUNPLUS=y
-CONFIG_HID_TOPSEED=y
-CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
-CONFIG_USB_EHCI_HCD=m
-CONFIG_USB_OHCI_HCD=m
-CONFIG_USB_UHCI_HCD=y
-CONFIG_USB_STORAGE=m
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
-CONFIG_REISERFS_FS=y
-CONFIG_REISERFS_FS_XATTR=y
-CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS_SECURITY=y
-CONFIG_XFS_FS=y
-CONFIG_AUTOFS_FS=y
-CONFIG_AUTOFS4_FS=y
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-CONFIG_UDF_FS=m
-CONFIG_VFAT_FS=y
-CONFIG_NTFS_FS=m
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_HUGETLBFS=y
-CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
-CONFIG_NFSD=m
-CONFIG_NFSD_V4=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
-CONFIG_CIFS=m
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_SGI_PARTITION=y
-CONFIG_EFI_PARTITION=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_737=m
-CONFIG_NLS_CODEPAGE_775=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-CONFIG_NLS_CODEPAGE_855=m
-CONFIG_NLS_CODEPAGE_857=m
-CONFIG_NLS_CODEPAGE_860=m
-CONFIG_NLS_CODEPAGE_861=m
-CONFIG_NLS_CODEPAGE_862=m
-CONFIG_NLS_CODEPAGE_863=m
-CONFIG_NLS_CODEPAGE_864=m
-CONFIG_NLS_CODEPAGE_865=m
-CONFIG_NLS_CODEPAGE_866=m
-CONFIG_NLS_CODEPAGE_869=m
-CONFIG_NLS_CODEPAGE_936=m
-CONFIG_NLS_CODEPAGE_950=m
-CONFIG_NLS_CODEPAGE_932=m
-CONFIG_NLS_CODEPAGE_949=m
-CONFIG_NLS_CODEPAGE_874=m
-CONFIG_NLS_ISO8859_8=m
-CONFIG_NLS_CODEPAGE_1250=m
-CONFIG_NLS_CODEPAGE_1251=m
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_KOI8_R=m
-CONFIG_NLS_KOI8_U=m
-CONFIG_NLS_UTF8=m
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_MUTEXES=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_IA64_GRANULE_16MB=y
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_MD5=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set

+ 0 - 2
arch/ia64/include/asm/acpi.h

@@ -111,8 +111,6 @@ static inline const char *acpi_get_sysname (void)
 	return "uv";
 # elif defined (CONFIG_IA64_DIG)
 	return "dig";
-# elif defined (CONFIG_IA64_XEN_GUEST)
-	return "xen";
 # elif defined(CONFIG_IA64_DIG_VTD)
 	return "dig_vtd";
 # else

+ 23 - 0
arch/ia64/include/asm/barrier.h

@@ -45,13 +45,36 @@
 # define smp_rmb()	rmb()
 # define smp_wmb()	wmb()
 # define smp_read_barrier_depends()	read_barrier_depends()
+
 #else
+
 # define smp_mb()	barrier()
 # define smp_rmb()	barrier()
 # define smp_wmb()	barrier()
 # define smp_read_barrier_depends()	do { } while(0)
+
 #endif
 
+/*
+ * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
+ * need for asm trickery!
+ */
+
+#define smp_store_release(p, v)						\
+do {									\
+	compiletime_assert_atomic_type(*p);				\
+	barrier();							\
+	ACCESS_ONCE(*p) = (v);						\
+} while (0)
+
+#define smp_load_acquire(p)						\
+({									\
+	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	compiletime_assert_atomic_type(*p);				\
+	barrier();							\
+	___p1;								\
+})
+
 /*
  * XXX check on this ---I suspect what Linus really wants here is
  * acquire vs release semantics but we can't discuss this stuff with

+ 0 - 2
arch/ia64/include/asm/machvec.h

@@ -113,8 +113,6 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
 #  include <asm/machvec_sn2.h>
 # elif defined (CONFIG_IA64_SGI_UV)
 #  include <asm/machvec_uv.h>
-# elif defined (CONFIG_IA64_XEN_GUEST)
-#  include <asm/machvec_xen.h>
 # elif defined (CONFIG_IA64_GENERIC)
 
 # ifdef MACHVEC_PLATFORM_HEADER

+ 0 - 22
arch/ia64/include/asm/machvec_xen.h

@@ -1,22 +0,0 @@
-#ifndef _ASM_IA64_MACHVEC_XEN_h
-#define _ASM_IA64_MACHVEC_XEN_h
-
-extern ia64_mv_setup_t			dig_setup;
-extern ia64_mv_cpu_init_t		xen_cpu_init;
-extern ia64_mv_irq_init_t		xen_irq_init;
-extern ia64_mv_send_ipi_t		xen_platform_send_ipi;
-
-/*
- * This stuff has dual use!
- *
- * For a generic kernel, the macros are used to initialize the
- * platform's machvec structure.  When compiling a non-generic kernel,
- * the macros are used directly.
- */
-#define ia64_platform_name			"xen"
-#define platform_setup				dig_setup
-#define platform_cpu_init			xen_cpu_init
-#define platform_irq_init			xen_irq_init
-#define platform_send_ipi			xen_platform_send_ipi
-
-#endif /* _ASM_IA64_MACHVEC_XEN_h */

+ 0 - 1
arch/ia64/include/asm/meminit.h

@@ -18,7 +18,6 @@
  * 	- crash dumping code reserved region
  * 	- Kernel memory map built from EFI memory map
  * 	- ELF core header
- *	- xen start info if CONFIG_XEN
  *
  * More could be added if necessary
  */

+ 0 - 1
arch/ia64/include/asm/paravirt.h

@@ -75,7 +75,6 @@ void *paravirt_get_gate_section(void);
 #ifdef CONFIG_PARAVIRT_GUEST
 
 #define PARAVIRT_HYPERVISOR_TYPE_DEFAULT	0
-#define PARAVIRT_HYPERVISOR_TYPE_XEN		1
 
 #ifndef __ASSEMBLY__
 

+ 1 - 1
arch/ia64/include/asm/pvclock-abi.h

@@ -11,7 +11,7 @@
 /*
  * These structs MUST NOT be changed.
  * They are the ABI between hypervisor and guest OS.
- * Both Xen and KVM are using this.
+ * KVM is using this.
  *
  * pvclock_vcpu_time_info holds the system time and the tsc timestamp
  * of the last update. So the guest can use the tsc delta to get a

+ 0 - 51
arch/ia64/include/asm/sync_bitops.h

@@ -1,51 +0,0 @@
-#ifndef _ASM_IA64_SYNC_BITOPS_H
-#define _ASM_IA64_SYNC_BITOPS_H
-
-/*
- * Copyright (C) 2008 Isaku Yamahata <yamahata at valinux co jp>
- *
- * Based on synch_bitops.h which Dan Magenhaimer wrote.
- *
- * bit operations which provide guaranteed strong synchronisation
- * when communicating with Xen or other guest OSes running on other CPUs.
- */
-
-static inline void sync_set_bit(int nr, volatile void *addr)
-{
-	set_bit(nr, addr);
-}
-
-static inline void sync_clear_bit(int nr, volatile void *addr)
-{
-	clear_bit(nr, addr);
-}
-
-static inline void sync_change_bit(int nr, volatile void *addr)
-{
-	change_bit(nr, addr);
-}
-
-static inline int sync_test_and_set_bit(int nr, volatile void *addr)
-{
-	return test_and_set_bit(nr, addr);
-}
-
-static inline int sync_test_and_clear_bit(int nr, volatile void *addr)
-{
-	return test_and_clear_bit(nr, addr);
-}
-
-static inline int sync_test_and_change_bit(int nr, volatile void *addr)
-{
-	return test_and_change_bit(nr, addr);
-}
-
-static inline int sync_test_bit(int nr, const volatile void *addr)
-{
-	return test_bit(nr, addr);
-}
-
-#define sync_cmpxchg(ptr, old, new)				\
-	((__typeof__(*(ptr)))cmpxchg_acq((ptr), (old), (new)))
-
-#endif /* _ASM_IA64_SYNC_BITOPS_H */

+ 0 - 41
arch/ia64/include/asm/xen/events.h

@@ -1,41 +0,0 @@
-/******************************************************************************
- * arch/ia64/include/asm/xen/events.h
- *
- * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
- *                    VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
- */
-#ifndef _ASM_IA64_XEN_EVENTS_H
-#define _ASM_IA64_XEN_EVENTS_H
-
-enum ipi_vector {
-	XEN_RESCHEDULE_VECTOR,
-	XEN_IPI_VECTOR,
-	XEN_CMCP_VECTOR,
-	XEN_CPEP_VECTOR,
-
-	XEN_NR_IPIS,
-};
-
-static inline int xen_irqs_disabled(struct pt_regs *regs)
-{
-	return !(ia64_psr(regs)->i);
-}
-
-#define irq_ctx_init(cpu)	do { } while (0)
-
-#endif /* _ASM_IA64_XEN_EVENTS_H */

+ 0 - 265
arch/ia64/include/asm/xen/hypercall.h

@@ -1,265 +0,0 @@
-/******************************************************************************
- * hypercall.h
- *
- * Linux-specific hypervisor handling.
- *
- * Copyright (c) 2002-2004, K A Fraser
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation; or, when distributed
- * separately from the Linux kernel or incorporated into other
- * software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef _ASM_IA64_XEN_HYPERCALL_H
-#define _ASM_IA64_XEN_HYPERCALL_H
-
-#include <xen/interface/xen.h>
-#include <xen/interface/physdev.h>
-#include <xen/interface/sched.h>
-#include <asm/xen/xcom_hcall.h>
-struct xencomm_handle;
-extern unsigned long __hypercall(unsigned long a1, unsigned long a2,
-				 unsigned long a3, unsigned long a4,
-				 unsigned long a5, unsigned long cmd);
-
-/*
- * Assembler stubs for hyper-calls.
- */
-
-#define _hypercall0(type, name)					\
-({								\
-	long __res;						\
-	__res = __hypercall(0, 0, 0, 0, 0, __HYPERVISOR_##name);\
-	(type)__res;						\
-})
-
-#define _hypercall1(type, name, a1)				\
-({								\
-	long __res;						\
-	__res = __hypercall((unsigned long)a1,			\
-			     0, 0, 0, 0, __HYPERVISOR_##name);	\
-	(type)__res;						\
-})
-
-#define _hypercall2(type, name, a1, a2)				\
-({								\
-	long __res;						\
-	__res = __hypercall((unsigned long)a1,			\
-			    (unsigned long)a2,			\
-			    0, 0, 0, __HYPERVISOR_##name);	\
-	(type)__res;						\
-})
-
-#define _hypercall3(type, name, a1, a2, a3)			\
-({								\
-	long __res;						\
-	__res = __hypercall((unsigned long)a1,			\
-			    (unsigned long)a2,			\
-			    (unsigned long)a3,			\
-			    0, 0, __HYPERVISOR_##name);		\
-	(type)__res;						\
-})
-
-#define _hypercall4(type, name, a1, a2, a3, a4)			\
-({								\
-	long __res;						\
-	__res = __hypercall((unsigned long)a1,			\
-			    (unsigned long)a2,			\
-			    (unsigned long)a3,			\
-			    (unsigned long)a4,			\
-			    0, __HYPERVISOR_##name);		\
-	(type)__res;						\
-})
-
-#define _hypercall5(type, name, a1, a2, a3, a4, a5)		\
-({								\
-	long __res;						\
-	__res = __hypercall((unsigned long)a1,			\
-			    (unsigned long)a2,			\
-			    (unsigned long)a3,			\
-			    (unsigned long)a4,			\
-			    (unsigned long)a5,			\
-			    __HYPERVISOR_##name);		\
-	(type)__res;						\
-})
-
-
-static inline int
-xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg)
-{
-	return _hypercall2(int, sched_op, cmd, arg);
-}
-
-static inline long
-HYPERVISOR_set_timer_op(u64 timeout)
-{
-	unsigned long timeout_hi = (unsigned long)(timeout >> 32);
-	unsigned long timeout_lo = (unsigned long)timeout;
-	return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
-}
-
-static inline int
-xencomm_arch_hypercall_multicall(struct xencomm_handle *call_list,
-				 int nr_calls)
-{
-	return _hypercall2(int, multicall, call_list, nr_calls);
-}
-
-static inline int
-xencomm_arch_hypercall_memory_op(unsigned int cmd, struct xencomm_handle *arg)
-{
-	return _hypercall2(int, memory_op, cmd, arg);
-}
-
-static inline int
-xencomm_arch_hypercall_event_channel_op(int cmd, struct xencomm_handle *arg)
-{
-	return _hypercall2(int, event_channel_op, cmd, arg);
-}
-
-static inline int
-xencomm_arch_hypercall_xen_version(int cmd, struct xencomm_handle *arg)
-{
-	return _hypercall2(int, xen_version, cmd, arg);
-}
-
-static inline int
-xencomm_arch_hypercall_console_io(int cmd, int count,
-				  struct xencomm_handle *str)
-{
-	return _hypercall3(int, console_io, cmd, count, str);
-}
-
-static inline int
-xencomm_arch_hypercall_physdev_op(int cmd, struct xencomm_handle *arg)
-{
-	return _hypercall2(int, physdev_op, cmd, arg);
-}
-
-static inline int
-xencomm_arch_hypercall_grant_table_op(unsigned int cmd,
-				      struct xencomm_handle *uop,
-				      unsigned int count)
-{
-	return _hypercall3(int, grant_table_op, cmd, uop, count);
-}
-
-int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count);
-
-extern int xencomm_arch_hypercall_suspend(struct xencomm_handle *arg);
-
-static inline int
-xencomm_arch_hypercall_callback_op(int cmd, struct xencomm_handle *arg)
-{
-	return _hypercall2(int, callback_op, cmd, arg);
-}
-
-static inline long
-xencomm_arch_hypercall_vcpu_op(int cmd, int cpu, void *arg)
-{
-	return _hypercall3(long, vcpu_op, cmd, cpu, arg);
-}
-
-static inline int
-HYPERVISOR_physdev_op(int cmd, void *arg)
-{
-	switch (cmd) {
-	case PHYSDEVOP_eoi:
-		return _hypercall1(int, ia64_fast_eoi,
-				   ((struct physdev_eoi *)arg)->irq);
-	default:
-		return xencomm_hypercall_physdev_op(cmd, arg);
-	}
-}
-
-static inline long
-xencomm_arch_hypercall_opt_feature(struct xencomm_handle *arg)
-{
-	return _hypercall1(long, opt_feature, arg);
-}
-
-/* for balloon driver */
-#define HYPERVISOR_update_va_mapping(va, new_val, flags) (0)
-
-/* Use xencomm to do hypercalls.  */
-#define HYPERVISOR_sched_op xencomm_hypercall_sched_op
-#define HYPERVISOR_event_channel_op xencomm_hypercall_event_channel_op
-#define HYPERVISOR_callback_op xencomm_hypercall_callback_op
-#define HYPERVISOR_multicall xencomm_hypercall_multicall
-#define HYPERVISOR_xen_version xencomm_hypercall_xen_version
-#define HYPERVISOR_console_io xencomm_hypercall_console_io
-#define HYPERVISOR_memory_op xencomm_hypercall_memory_op
-#define HYPERVISOR_suspend xencomm_hypercall_suspend
-#define HYPERVISOR_vcpu_op xencomm_hypercall_vcpu_op
-#define HYPERVISOR_opt_feature xencomm_hypercall_opt_feature
-
-/* to compile gnttab_copy_grant_page() in drivers/xen/core/gnttab.c */
-#define HYPERVISOR_mmu_update(req, count, success_count, domid) ({ BUG(); 0; })
-
-static inline int
-HYPERVISOR_shutdown(
-	unsigned int reason)
-{
-	struct sched_shutdown sched_shutdown = {
-		.reason = reason
-	};
-
-	int rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown);
-
-	return rc;
-}
-
-/* for netfront.c, netback.c */
-#define MULTI_UVMFLAGS_INDEX 0 /* XXX any value */
-
-static inline void
-MULTI_update_va_mapping(
-	struct multicall_entry *mcl, unsigned long va,
-	pte_t new_val, unsigned long flags)
-{
-	mcl->op = __HYPERVISOR_update_va_mapping;
-	mcl->result = 0;
-}
-
-static inline void
-MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd,
-	void *uop, unsigned int count)
-{
-	mcl->op = __HYPERVISOR_grant_table_op;
-	mcl->args[0] = cmd;
-	mcl->args[1] = (unsigned long)uop;
-	mcl->args[2] = count;
-}
-
-static inline void
-MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
-		 int count, int *success_count, domid_t domid)
-{
-	mcl->op = __HYPERVISOR_mmu_update;
-	mcl->args[0] = (unsigned long)req;
-	mcl->args[1] = count;
-	mcl->args[2] = (unsigned long)success_count;
-	mcl->args[3] = domid;
-}
-
-#endif /* _ASM_IA64_XEN_HYPERCALL_H */

+ 0 - 61
arch/ia64/include/asm/xen/hypervisor.h

@@ -1,61 +0,0 @@
-/******************************************************************************
- * hypervisor.h
- *
- * Linux-specific hypervisor handling.
- *
- * Copyright (c) 2002-2004, K A Fraser
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation; or, when distributed
- * separately from the Linux kernel or incorporated into other
- * software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef _ASM_IA64_XEN_HYPERVISOR_H
-#define _ASM_IA64_XEN_HYPERVISOR_H
-
-#include <linux/err.h>
-#include <xen/interface/xen.h>
-#include <xen/interface/version.h>	/* to compile feature.c */
-#include <xen/features.h>		/* to comiple xen-netfront.c */
-#include <xen/xen.h>
-#include <asm/xen/hypercall.h>
-
-#ifdef CONFIG_XEN
-extern struct shared_info *HYPERVISOR_shared_info;
-extern struct start_info *xen_start_info;
-
-void __init xen_setup_vcpu_info_placement(void);
-void force_evtchn_callback(void);
-
-/* for drivers/xen/balloon/balloon.c */
-#ifdef CONFIG_XEN_SCRUB_PAGES
-#define scrub_pages(_p, _n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
-#else
-#define scrub_pages(_p, _n) ((void)0)
-#endif
-
-/* For setup_arch() in arch/ia64/kernel/setup.c */
-void xen_ia64_enable_opt_feature(void);
-#endif
-
-#endif /* _ASM_IA64_XEN_HYPERVISOR_H */

+ 0 - 486
arch/ia64/include/asm/xen/inst.h

@@ -1,486 +0,0 @@
-/******************************************************************************
- * arch/ia64/include/asm/xen/inst.h
- *
- * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
- *                    VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
- */
-
-#include <asm/xen/privop.h>
-
-#define ia64_ivt				xen_ivt
-#define DO_SAVE_MIN				XEN_DO_SAVE_MIN
-
-#define __paravirt_switch_to			xen_switch_to
-#define __paravirt_leave_syscall		xen_leave_syscall
-#define __paravirt_work_processed_syscall	xen_work_processed_syscall
-#define __paravirt_leave_kernel			xen_leave_kernel
-#define __paravirt_pending_syscall_end		xen_work_pending_syscall_end
-#define __paravirt_work_processed_syscall_target \
-						xen_work_processed_syscall
-
-#define paravirt_fsyscall_table			xen_fsyscall_table
-#define paravirt_fsys_bubble_down		xen_fsys_bubble_down
-
-#define MOV_FROM_IFA(reg)	\
-	movl reg = XSI_IFA;	\
-	;;			\
-	ld8 reg = [reg]
-
-#define MOV_FROM_ITIR(reg)	\
-	movl reg = XSI_ITIR;	\
-	;;			\
-	ld8 reg = [reg]
-
-#define MOV_FROM_ISR(reg)	\
-	movl reg = XSI_ISR;	\
-	;;			\
-	ld8 reg = [reg]
-
-#define MOV_FROM_IHA(reg)	\
-	movl reg = XSI_IHA;	\
-	;;			\
-	ld8 reg = [reg]
-
-#define MOV_FROM_IPSR(pred, reg)	\
-(pred)	movl reg = XSI_IPSR;		\
-	;;				\
-(pred)	ld8 reg = [reg]
-
-#define MOV_FROM_IIM(reg)	\
-	movl reg = XSI_IIM;	\
-	;;			\
-	ld8 reg = [reg]
-
-#define MOV_FROM_IIP(reg)	\
-	movl reg = XSI_IIP;	\
-	;;			\
-	ld8 reg = [reg]
-
-.macro __MOV_FROM_IVR reg, clob
-	.ifc "\reg", "r8"
-		XEN_HYPER_GET_IVR
-		.exitm
-	.endif
-	.ifc "\clob", "r8"
-		XEN_HYPER_GET_IVR
-		;;
-		mov \reg = r8
-		.exitm
-	.endif
-
-	mov \clob = r8
-	;;
-	XEN_HYPER_GET_IVR
-	;;
-	mov \reg = r8
-	;;
-	mov r8 = \clob
-.endm
-#define MOV_FROM_IVR(reg, clob)	__MOV_FROM_IVR reg, clob
-
-.macro __MOV_FROM_PSR pred, reg, clob
-	.ifc "\reg", "r8"
-		(\pred)	XEN_HYPER_GET_PSR;
-		.exitm
-	.endif
-	.ifc "\clob", "r8"
-		(\pred)	XEN_HYPER_GET_PSR
-		;;
-		(\pred)	mov \reg = r8
-		.exitm
-	.endif
-
-	(\pred)	mov \clob = r8
-	(\pred)	XEN_HYPER_GET_PSR
-	;;
-	(\pred)	mov \reg = r8
-	(\pred)	mov r8 = \clob
-.endm
-#define MOV_FROM_PSR(pred, reg, clob)	__MOV_FROM_PSR pred, reg, clob
-
-/* assuming ar.itc is read with interrupt disabled. */
-#define MOV_FROM_ITC(pred, pred_clob, reg, clob)		\
-(pred)	movl clob = XSI_ITC_OFFSET;				\
-	;;							\
-(pred)	ld8 clob = [clob];					\
-(pred)	mov reg = ar.itc;					\
-	;;							\
-(pred)	add reg = reg, clob;					\
-	;;							\
-(pred)	movl clob = XSI_ITC_LAST;				\
-	;;							\
-(pred)	ld8 clob = [clob];					\
-	;;							\
-(pred)	cmp.geu.unc pred_clob, p0 = clob, reg;			\
-	;;							\
-(pred_clob)	add reg = 1, clob;				\
-	;;							\
-(pred)	movl clob = XSI_ITC_LAST;				\
-	;;							\
-(pred)	st8 [clob] = reg
-
-
-#define MOV_TO_IFA(reg, clob)	\
-	movl clob = XSI_IFA;	\
-	;;			\
-	st8 [clob] = reg	\
-
-#define MOV_TO_ITIR(pred, reg, clob)	\
-(pred)	movl clob = XSI_ITIR;		\
-	;;				\
-(pred)	st8 [clob] = reg
-
-#define MOV_TO_IHA(pred, reg, clob)	\
-(pred)	movl clob = XSI_IHA;		\
-	;;				\
-(pred)	st8 [clob] = reg
-
-#define MOV_TO_IPSR(pred, reg, clob)	\
-(pred)	movl clob = XSI_IPSR;		\
-	;;				\
-(pred)	st8 [clob] = reg;		\
-	;;
-
-#define MOV_TO_IFS(pred, reg, clob)	\
-(pred)	movl clob = XSI_IFS;		\
-	;;				\
-(pred)	st8 [clob] = reg;		\
-	;;
-
-#define MOV_TO_IIP(reg, clob)	\
-	movl clob = XSI_IIP;	\
-	;;			\
-	st8 [clob] = reg
-
-.macro ____MOV_TO_KR kr, reg, clob0, clob1
-	.ifc "\clob0", "r9"
-		.error "clob0 \clob0 must not be r9"
-	.endif
-	.ifc "\clob1", "r8"
-		.error "clob1 \clob1 must not be r8"
-	.endif
-
-	.ifnc "\reg", "r9"
-		.ifnc "\clob1", "r9"
-			mov \clob1 = r9
-		.endif
-		mov r9 = \reg
-	.endif
-	.ifnc "\clob0", "r8"
-		mov \clob0 = r8
-	.endif
-	mov r8 = \kr
-	;;
-	XEN_HYPER_SET_KR
-
-	.ifnc "\reg", "r9"
-		.ifnc "\clob1", "r9"
-			mov r9 = \clob1
-		.endif
-	.endif
-	.ifnc "\clob0", "r8"
-		mov r8 = \clob0
-	.endif
-.endm
-
-.macro __MOV_TO_KR kr, reg, clob0, clob1
-	.ifc "\clob0", "r9"
-		____MOV_TO_KR \kr, \reg, \clob1, \clob0
-		.exitm
-	.endif
-	.ifc "\clob1", "r8"
-		____MOV_TO_KR \kr, \reg, \clob1, \clob0
-		.exitm
-	.endif
-
-	____MOV_TO_KR \kr, \reg, \clob0, \clob1
-.endm
-
-#define MOV_TO_KR(kr, reg, clob0, clob1) \
-	__MOV_TO_KR IA64_KR_ ## kr, reg, clob0, clob1
-
-
-.macro __ITC_I pred, reg, clob
-	.ifc "\reg", "r8"
-		(\pred)	XEN_HYPER_ITC_I
-		.exitm
-	.endif
-	.ifc "\clob", "r8"
-		(\pred)	mov r8 = \reg
-		;;
-		(\pred)	XEN_HYPER_ITC_I
-		.exitm
-	.endif
-
-	(\pred)	mov \clob = r8
-	(\pred)	mov r8 = \reg
-	;;
-	(\pred)	XEN_HYPER_ITC_I
-	;;
-	(\pred)	mov r8 = \clob
-	;;
-.endm
-#define ITC_I(pred, reg, clob)	__ITC_I pred, reg, clob
-
-.macro __ITC_D pred, reg, clob
-	.ifc "\reg", "r8"
-		(\pred)	XEN_HYPER_ITC_D
-		;;
-		.exitm
-	.endif
-	.ifc "\clob", "r8"
-		(\pred)	mov r8 = \reg
-		;;
-		(\pred)	XEN_HYPER_ITC_D
-		;;
-		.exitm
-	.endif
-
-	(\pred)	mov \clob = r8
-	(\pred)	mov r8 = \reg
-	;;
-	(\pred)	XEN_HYPER_ITC_D
-	;;
-	(\pred)	mov r8 = \clob
-	;;
-.endm
-#define ITC_D(pred, reg, clob)	__ITC_D pred, reg, clob
-
-.macro __ITC_I_AND_D pred_i, pred_d, reg, clob
-	.ifc "\reg", "r8"
-		(\pred_i)XEN_HYPER_ITC_I
-		;;
-		(\pred_d)XEN_HYPER_ITC_D
-		;;
-		.exitm
-	.endif
-	.ifc "\clob", "r8"
-		mov r8 = \reg
-		;;
-		(\pred_i)XEN_HYPER_ITC_I
-		;;
-		(\pred_d)XEN_HYPER_ITC_D
-		;;
-		.exitm
-	.endif
-
-	mov \clob = r8
-	mov r8 = \reg
-	;;
-	(\pred_i)XEN_HYPER_ITC_I
-	;;
-	(\pred_d)XEN_HYPER_ITC_D
-	;;
-	mov r8 = \clob
-	;;
-.endm
-#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
-	__ITC_I_AND_D pred_i, pred_d, reg, clob
-
-.macro __THASH pred, reg0, reg1, clob
-	.ifc "\reg0", "r8"
-		(\pred)	mov r8 = \reg1
-		(\pred)	XEN_HYPER_THASH
-		.exitm
-	.endc
-	.ifc "\reg1", "r8"
-		(\pred)	XEN_HYPER_THASH
-		;;
-		(\pred)	mov \reg0 = r8
-		;;
-		.exitm
-	.endif
-	.ifc "\clob", "r8"
-		(\pred)	mov r8 = \reg1
-		(\pred)	XEN_HYPER_THASH
-		;;
-		(\pred)	mov \reg0 = r8
-		;;
-		.exitm
-	.endif
-
-	(\pred)	mov \clob = r8
-	(\pred)	mov r8 = \reg1
-	(\pred)	XEN_HYPER_THASH
-	;;
-	(\pred)	mov \reg0 = r8
-	(\pred)	mov r8 = \clob
-	;;
-.endm
-#define THASH(pred, reg0, reg1, clob) __THASH pred, reg0, reg1, clob
-
-#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1)	\
-	mov clob0 = 1;						\
-	movl clob1 = XSI_PSR_IC;				\
-	;;							\
-	st4 [clob1] = clob0					\
-	;;
-
-#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1)	\
-	;;					\
-	srlz.d;					\
-	mov clob1 = 1;				\
-	movl clob0 = XSI_PSR_IC;		\
-	;;					\
-	st4 [clob0] = clob1
-
-#define RSM_PSR_IC(clob)	\
-	movl clob = XSI_PSR_IC;	\
-	;;			\
-	st4 [clob] = r0;	\
-	;;
-
-/* pred will be clobbered */
-#define MASK_TO_PEND_OFS    (-1)
-#define SSM_PSR_I(pred, pred_clob, clob)				\
-(pred)	movl clob = XSI_PSR_I_ADDR					\
-	;;								\
-(pred)	ld8 clob = [clob]						\
-	;;								\
-	/* if (pred) vpsr.i = 1 */					\
-	/* if (pred) (vcpu->vcpu_info->evtchn_upcall_mask)=0 */		\
-(pred)	st1 [clob] = r0, MASK_TO_PEND_OFS				\
-	;;								\
-	/* if (vcpu->vcpu_info->evtchn_upcall_pending) */		\
-(pred)	ld1 clob = [clob]						\
-	;;								\
-(pred)	cmp.ne.unc pred_clob, p0 = clob, r0				\
-	;;								\
-(pred_clob)XEN_HYPER_SSM_I	/* do areal ssm psr.i */
-
-#define RSM_PSR_I(pred, clob0, clob1)	\
-	movl clob0 = XSI_PSR_I_ADDR;	\
-	mov clob1 = 1;			\
-	;;				\
-	ld8 clob0 = [clob0];		\
-	;;				\
-(pred)	st1 [clob0] = clob1
-
-#define RSM_PSR_I_IC(clob0, clob1, clob2)		\
-	movl clob0 = XSI_PSR_I_ADDR;			\
-	movl clob1 = XSI_PSR_IC;			\
-	;;						\
-	ld8 clob0 = [clob0];				\
-	mov clob2 = 1;					\
-	;;						\
-	/* note: clears both vpsr.i and vpsr.ic! */	\
-	st1 [clob0] = clob2;				\
-	st4 [clob1] = r0;				\
-	;;
-
-#define RSM_PSR_DT		\
-	XEN_HYPER_RSM_PSR_DT
-
-#define RSM_PSR_BE_I(clob0, clob1)	\
-	RSM_PSR_I(p0, clob0, clob1);	\
-	rum psr.be
-
-#define SSM_PSR_DT_AND_SRLZ_I	\
-	XEN_HYPER_SSM_PSR_DT
-
-#define BSW_0(clob0, clob1, clob2)			\
-	;;						\
-	/* r16-r31 all now hold bank1 values */		\
-	mov clob2 = ar.unat;				\
-	movl clob0 = XSI_BANK1_R16;			\
-	movl clob1 = XSI_BANK1_R16 + 8;			\
-	;;						\
-.mem.offset 0, 0; st8.spill [clob0] = r16, 16;		\
-.mem.offset 8, 0; st8.spill [clob1] = r17, 16;		\
-	;;						\
-.mem.offset 0, 0; st8.spill [clob0] = r18, 16;		\
-.mem.offset 8, 0; st8.spill [clob1] = r19, 16;		\
-	;;						\
-.mem.offset 0, 0; st8.spill [clob0] = r20, 16;		\
-.mem.offset 8, 0; st8.spill [clob1] = r21, 16;		\
-	;;						\
-.mem.offset 0, 0; st8.spill [clob0] = r22, 16;		\
-.mem.offset 8, 0; st8.spill [clob1] = r23, 16;		\
-	;;						\
-.mem.offset 0, 0; st8.spill [clob0] = r24, 16;		\
-.mem.offset 8, 0; st8.spill [clob1] = r25, 16;		\
-	;;						\
-.mem.offset 0, 0; st8.spill [clob0] = r26, 16;		\
-.mem.offset 8, 0; st8.spill [clob1] = r27, 16;		\
-	;;						\
-.mem.offset 0, 0; st8.spill [clob0] = r28, 16;		\
-.mem.offset 8, 0; st8.spill [clob1] = r29, 16;		\
-	;;						\
-.mem.offset 0, 0; st8.spill [clob0] = r30, 16;		\
-.mem.offset 8, 0; st8.spill [clob1] = r31, 16;		\
-	;;						\
-	mov clob1 = ar.unat;				\
-	movl clob0 = XSI_B1NAT;				\
-	;;						\
-	st8 [clob0] = clob1;				\
-	mov ar.unat = clob2;				\
-	movl clob0 = XSI_BANKNUM;			\
-	;;						\
-	st4 [clob0] = r0
-
-
-	/* FIXME: THIS CODE IS NOT NaT SAFE! */
-#define XEN_BSW_1(clob)			\
-	mov clob = ar.unat;		\
-	movl r30 = XSI_B1NAT;		\
-	;;				\
-	ld8 r30 = [r30];		\
-	mov r31 = 1;			\
-	;;				\
-	mov ar.unat = r30;		\
-	movl r30 = XSI_BANKNUM;		\
-	;;				\
-	st4 [r30] = r31;		\
-	movl r30 = XSI_BANK1_R16;	\
-	movl r31 = XSI_BANK1_R16+8;	\
-	;;				\
-	ld8.fill r16 = [r30], 16;	\
-	ld8.fill r17 = [r31], 16;	\
-	;;				\
-	ld8.fill r18 = [r30], 16;	\
-	ld8.fill r19 = [r31], 16;	\
-	;;				\
-	ld8.fill r20 = [r30], 16;	\
-	ld8.fill r21 = [r31], 16;	\
-	;;				\
-	ld8.fill r22 = [r30], 16;	\
-	ld8.fill r23 = [r31], 16;	\
-	;;				\
-	ld8.fill r24 = [r30], 16;	\
-	ld8.fill r25 = [r31], 16;	\
-	;;				\
-	ld8.fill r26 = [r30], 16;	\
-	ld8.fill r27 = [r31], 16;	\
-	;;				\
-	ld8.fill r28 = [r30], 16;	\
-	ld8.fill r29 = [r31], 16;	\
-	;;				\
-	ld8.fill r30 = [r30];		\
-	ld8.fill r31 = [r31];		\
-	;;				\
-	mov ar.unat = clob
-
-#define BSW_1(clob0, clob1)	XEN_BSW_1(clob1)
-
-
-#define COVER	\
-	XEN_HYPER_COVER
-
-#define RFI			\
-	XEN_HYPER_RFI;		\
-	dv_serialize_data

+ 0 - 363
arch/ia64/include/asm/xen/interface.h

@@ -1,363 +0,0 @@
-/******************************************************************************
- * arch-ia64/hypervisor-if.h
- *
- * Guest OS interface to IA64 Xen.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Copyright by those who contributed. (in alphabetical order)
- *
- * Anthony Xu <anthony.xu@intel.com>
- * Eddie Dong <eddie.dong@intel.com>
- * Fred Yang <fred.yang@intel.com>
- * Kevin Tian <kevin.tian@intel.com>
- * Alex Williamson <alex.williamson@hp.com>
- * Chris Wright <chrisw@sous-sol.org>
- * Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
- * Dietmar Hahn <dietmar.hahn@fujitsu-siemens.com>
- * Hollis Blanchard <hollisb@us.ibm.com>
- * Isaku Yamahata <yamahata@valinux.co.jp>
- * Jan Beulich <jbeulich@novell.com>
- * John Levon <john.levon@sun.com>
- * Kazuhiro Suzuki <kaz@jp.fujitsu.com>
- * Keir Fraser <keir.fraser@citrix.com>
- * Kouya Shimura <kouya@jp.fujitsu.com>
- * Masaki Kanno <kanno.masaki@jp.fujitsu.com>
- * Matt Chapman <matthewc@hp.com>
- * Matthew Chapman <matthewc@hp.com>
- * Samuel Thibault <samuel.thibault@eu.citrix.com>
- * Tomonari Horikoshi <t.horikoshi@jp.fujitsu.com>
- * Tristan Gingold <tgingold@free.fr>
- * Tsunehisa Doi <Doi.Tsunehisa@jp.fujitsu.com>
- * Yutaka Ezaki <yutaka.ezaki@jp.fujitsu.com>
- * Zhang Xin <xing.z.zhang@intel.com>
- * Zhang xiantao <xiantao.zhang@intel.com>
- * dan.magenheimer@hp.com
- * ian.pratt@cl.cam.ac.uk
- * michael.fetterman@cl.cam.ac.uk
- */
-
-#ifndef _ASM_IA64_XEN_INTERFACE_H
-#define _ASM_IA64_XEN_INTERFACE_H
-
-#define __DEFINE_GUEST_HANDLE(name, type)	\
-	typedef struct { type *p; } __guest_handle_ ## name
-
-#define DEFINE_GUEST_HANDLE_STRUCT(name)	\
-	__DEFINE_GUEST_HANDLE(name, struct name)
-#define DEFINE_GUEST_HANDLE(name)	__DEFINE_GUEST_HANDLE(name, name)
-#define GUEST_HANDLE(name)		__guest_handle_ ## name
-#define GUEST_HANDLE_64(name)		GUEST_HANDLE(name)
-#define set_xen_guest_handle(hnd, val)	do { (hnd).p = val; } while (0)
-
-#ifndef __ASSEMBLY__
-/* Explicitly size integers that represent pfns in the public interface
- * with Xen so that we could have one ABI that works for 32 and 64 bit
- * guests. */
-typedef unsigned long xen_pfn_t;
-typedef unsigned long xen_ulong_t;
-/* Guest handles for primitive C types. */
-__DEFINE_GUEST_HANDLE(uchar, unsigned char);
-__DEFINE_GUEST_HANDLE(uint, unsigned int);
-__DEFINE_GUEST_HANDLE(ulong, unsigned long);
-
-DEFINE_GUEST_HANDLE(char);
-DEFINE_GUEST_HANDLE(int);
-DEFINE_GUEST_HANDLE(long);
-DEFINE_GUEST_HANDLE(void);
-DEFINE_GUEST_HANDLE(uint64_t);
-DEFINE_GUEST_HANDLE(uint32_t);
-
-DEFINE_GUEST_HANDLE(xen_pfn_t);
-#define PRI_xen_pfn	"lx"
-#endif
-
-/* Arch specific VIRQs definition */
-#define VIRQ_ITC	VIRQ_ARCH_0	/* V. Virtual itc timer */
-#define VIRQ_MCA_CMC	VIRQ_ARCH_1	/* MCA cmc interrupt */
-#define VIRQ_MCA_CPE	VIRQ_ARCH_2	/* MCA cpe interrupt */
-
-/* Maximum number of virtual CPUs in multi-processor guests. */
-/* keep sizeof(struct shared_page) <= PAGE_SIZE.
- * this is checked in arch/ia64/xen/hypervisor.c. */
-#define MAX_VIRT_CPUS	64
-
-#ifndef __ASSEMBLY__
-
-#define INVALID_MFN	(~0UL)
-
-union vac {
-	unsigned long value;
-	struct {
-		int a_int:1;
-		int a_from_int_cr:1;
-		int a_to_int_cr:1;
-		int a_from_psr:1;
-		int a_from_cpuid:1;
-		int a_cover:1;
-		int a_bsw:1;
-		long reserved:57;
-	};
-};
-
-union vdc {
-	unsigned long value;
-	struct {
-		int d_vmsw:1;
-		int d_extint:1;
-		int d_ibr_dbr:1;
-		int d_pmc:1;
-		int d_to_pmd:1;
-		int d_itm:1;
-		long reserved:58;
-	};
-};
-
-struct mapped_regs {
-	union vac vac;
-	union vdc vdc;
-	unsigned long virt_env_vaddr;
-	unsigned long reserved1[29];
-	unsigned long vhpi;
-	unsigned long reserved2[95];
-	union {
-		unsigned long vgr[16];
-		unsigned long bank1_regs[16];	/* bank1 regs (r16-r31)
-						   when bank0 active */
-	};
-	union {
-		unsigned long vbgr[16];
-		unsigned long bank0_regs[16];	/* bank0 regs (r16-r31)
-						   when bank1 active */
-	};
-	unsigned long vnat;
-	unsigned long vbnat;
-	unsigned long vcpuid[5];
-	unsigned long reserved3[11];
-	unsigned long vpsr;
-	unsigned long vpr;
-	unsigned long reserved4[76];
-	union {
-		unsigned long vcr[128];
-		struct {
-			unsigned long dcr;	/* CR0 */
-			unsigned long itm;
-			unsigned long iva;
-			unsigned long rsv1[5];
-			unsigned long pta;	/* CR8 */
-			unsigned long rsv2[7];
-			unsigned long ipsr;	/* CR16 */
-			unsigned long isr;
-			unsigned long rsv3;
-			unsigned long iip;
-			unsigned long ifa;
-			unsigned long itir;
-			unsigned long iipa;
-			unsigned long ifs;
-			unsigned long iim;	/* CR24 */
-			unsigned long iha;
-			unsigned long rsv4[38];
-			unsigned long lid;	/* CR64 */
-			unsigned long ivr;
-			unsigned long tpr;
-			unsigned long eoi;
-			unsigned long irr[4];
-			unsigned long itv;	/* CR72 */
-			unsigned long pmv;
-			unsigned long cmcv;
-			unsigned long rsv5[5];
-			unsigned long lrr0;	/* CR80 */
-			unsigned long lrr1;
-			unsigned long rsv6[46];
-		};
-	};
-	union {
-		unsigned long reserved5[128];
-		struct {
-			unsigned long precover_ifs;
-			unsigned long unat;	/* not sure if this is needed
-						   until NaT arch is done */
-			int interrupt_collection_enabled; /* virtual psr.ic */
-
-			/* virtual interrupt deliverable flag is
-			 * evtchn_upcall_mask in shared info area now.
-			 * interrupt_mask_addr is the address
-			 * of evtchn_upcall_mask for current vcpu
-			 */
-			unsigned char *interrupt_mask_addr;
-			int pending_interruption;
-			unsigned char vpsr_pp;
-			unsigned char vpsr_dfh;
-			unsigned char hpsr_dfh;
-			unsigned char hpsr_mfh;
-			unsigned long reserved5_1[4];
-			int metaphysical_mode;	/* 1 = use metaphys mapping
-						   0 = use virtual */
-			int banknum;		/* 0 or 1, which virtual
-						   register bank is active */
-			unsigned long rrs[8];	/* region registers */
-			unsigned long krs[8];	/* kernel registers */
-			unsigned long tmp[16];	/* temp registers
-						   (e.g. for hyperprivops) */
-
-			/* itc paravirtualization
-			 * vAR.ITC = mAR.ITC + itc_offset
-			 * itc_last is one which was lastly passed to
-			 * the guest OS in order to prevent it from
-			 * going backwords.
-			 */
-			unsigned long itc_offset;
-			unsigned long itc_last;
-		};
-	};
-};
-
-struct arch_vcpu_info {
-	/* nothing */
-};
-
-/*
- * This structure is used for magic page in domain pseudo physical address
- * space and the result of XENMEM_machine_memory_map.
- * As the XENMEM_machine_memory_map result,
- * xen_memory_map::nr_entries indicates the size in bytes
- * including struct xen_ia64_memmap_info. Not the number of entries.
- */
-struct xen_ia64_memmap_info {
-	uint64_t efi_memmap_size;	/* size of EFI memory map */
-	uint64_t efi_memdesc_size;	/* size of an EFI memory map
-					 * descriptor */
-	uint32_t efi_memdesc_version;	/* memory descriptor version */
-	void *memdesc[0];		/* array of efi_memory_desc_t */
-};
-
-struct arch_shared_info {
-	/* PFN of the start_info page.	*/
-	unsigned long start_info_pfn;
-
-	/* Interrupt vector for event channel.	*/
-	int evtchn_vector;
-
-	/* PFN of memmap_info page */
-	unsigned int memmap_info_num_pages;	/* currently only = 1 case is
-						   supported. */
-	unsigned long memmap_info_pfn;
-
-	uint64_t pad[31];
-};
-
-struct xen_callback {
-	unsigned long ip;
-};
-typedef struct xen_callback xen_callback_t;
-
-#endif /* !__ASSEMBLY__ */
-
-#include <asm/pvclock-abi.h>
-
-/* Size of the shared_info area (this is not related to page size).  */
-#define XSI_SHIFT			14
-#define XSI_SIZE			(1 << XSI_SHIFT)
-/* Log size of mapped_regs area (64 KB - only 4KB is used).  */
-#define XMAPPEDREGS_SHIFT		12
-#define XMAPPEDREGS_SIZE		(1 << XMAPPEDREGS_SHIFT)
-/* Offset of XASI (Xen arch shared info) wrt XSI_BASE.	*/
-#define XMAPPEDREGS_OFS			XSI_SIZE
-
-/* Hyperprivops.  */
-#define HYPERPRIVOP_START		0x1
-#define HYPERPRIVOP_RFI			(HYPERPRIVOP_START + 0x0)
-#define HYPERPRIVOP_RSM_DT		(HYPERPRIVOP_START + 0x1)
-#define HYPERPRIVOP_SSM_DT		(HYPERPRIVOP_START + 0x2)
-#define HYPERPRIVOP_COVER		(HYPERPRIVOP_START + 0x3)
-#define HYPERPRIVOP_ITC_D		(HYPERPRIVOP_START + 0x4)
-#define HYPERPRIVOP_ITC_I		(HYPERPRIVOP_START + 0x5)
-#define HYPERPRIVOP_SSM_I		(HYPERPRIVOP_START + 0x6)
-#define HYPERPRIVOP_GET_IVR		(HYPERPRIVOP_START + 0x7)
-#define HYPERPRIVOP_GET_TPR		(HYPERPRIVOP_START + 0x8)
-#define HYPERPRIVOP_SET_TPR		(HYPERPRIVOP_START + 0x9)
-#define HYPERPRIVOP_EOI			(HYPERPRIVOP_START + 0xa)
-#define HYPERPRIVOP_SET_ITM		(HYPERPRIVOP_START + 0xb)
-#define HYPERPRIVOP_THASH		(HYPERPRIVOP_START + 0xc)
-#define HYPERPRIVOP_PTC_GA		(HYPERPRIVOP_START + 0xd)
-#define HYPERPRIVOP_ITR_D		(HYPERPRIVOP_START + 0xe)
-#define HYPERPRIVOP_GET_RR		(HYPERPRIVOP_START + 0xf)
-#define HYPERPRIVOP_SET_RR		(HYPERPRIVOP_START + 0x10)
-#define HYPERPRIVOP_SET_KR		(HYPERPRIVOP_START + 0x11)
-#define HYPERPRIVOP_FC			(HYPERPRIVOP_START + 0x12)
-#define HYPERPRIVOP_GET_CPUID		(HYPERPRIVOP_START + 0x13)
-#define HYPERPRIVOP_GET_PMD		(HYPERPRIVOP_START + 0x14)
-#define HYPERPRIVOP_GET_EFLAG		(HYPERPRIVOP_START + 0x15)
-#define HYPERPRIVOP_SET_EFLAG		(HYPERPRIVOP_START + 0x16)
-#define HYPERPRIVOP_RSM_BE		(HYPERPRIVOP_START + 0x17)
-#define HYPERPRIVOP_GET_PSR		(HYPERPRIVOP_START + 0x18)
-#define HYPERPRIVOP_SET_RR0_TO_RR4	(HYPERPRIVOP_START + 0x19)
-#define HYPERPRIVOP_MAX			(0x1a)
-
-/* Fast and light hypercalls.  */
-#define __HYPERVISOR_ia64_fast_eoi	__HYPERVISOR_arch_1
-
-/* Xencomm macros.  */
-#define XENCOMM_INLINE_MASK		0xf800000000000000UL
-#define XENCOMM_INLINE_FLAG		0x8000000000000000UL
-
-#ifndef __ASSEMBLY__
-
-/*
- * Optimization features.
- * The hypervisor may do some special optimizations for guests. This hypercall
- * can be used to switch on/of these special optimizations.
- */
-#define __HYPERVISOR_opt_feature	0x700UL
-
-#define XEN_IA64_OPTF_OFF		0x0
-#define XEN_IA64_OPTF_ON		0x1
-
-/*
- * If this feature is switched on, the hypervisor inserts the
- * tlb entries without calling the guests traphandler.
- * This is useful in guests using region 7 for identity mapping
- * like the linux kernel does.
- */
-#define XEN_IA64_OPTF_IDENT_MAP_REG7	1
-
-/* Identity mapping of region 4 addresses in HVM. */
-#define XEN_IA64_OPTF_IDENT_MAP_REG4	2
-
-/* Identity mapping of region 5 addresses in HVM. */
-#define XEN_IA64_OPTF_IDENT_MAP_REG5	3
-
-#define XEN_IA64_OPTF_IDENT_MAP_NOT_SET	 (0)
-
-struct xen_ia64_opt_feature {
-	unsigned long cmd;	/* Which feature */
-	unsigned char on;	/* Switch feature on/off */
-	union {
-		struct {
-			/* The page protection bit mask of the pte.
-			 * This will be or'ed with the pte. */
-			unsigned long pgprot;
-			unsigned long key;	/* A protection key for itir.*/
-		};
-	};
-};
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _ASM_IA64_XEN_INTERFACE_H */

+ 0 - 44
arch/ia64/include/asm/xen/irq.h

@@ -1,44 +0,0 @@
-/******************************************************************************
- * arch/ia64/include/asm/xen/irq.h
- *
- * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
- *                    VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
- */
-
-#ifndef _ASM_IA64_XEN_IRQ_H
-#define _ASM_IA64_XEN_IRQ_H
-
-/*
- * The flat IRQ space is divided into two regions:
- *  1. A one-to-one mapping of real physical IRQs. This space is only used
- *     if we have physical device-access privilege. This region is at the
- *     start of the IRQ space so that existing device drivers do not need
- *     to be modified to translate physical IRQ numbers into our IRQ space.
- *  3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
- *     are bound using the provided bind/unbind functions.
- */
-
-#define XEN_PIRQ_BASE		0
-#define XEN_NR_PIRQS		256
-
-#define XEN_DYNIRQ_BASE		(XEN_PIRQ_BASE + XEN_NR_PIRQS)
-#define XEN_NR_DYNIRQS		(NR_CPUS * 8)
-
-#define XEN_NR_IRQS		(XEN_NR_PIRQS + XEN_NR_DYNIRQS)
-
-#endif /* _ASM_IA64_XEN_IRQ_H */

+ 0 - 143
arch/ia64/include/asm/xen/minstate.h

@@ -1,143 +0,0 @@
-
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-/* read ar.itc in advance, and use it before leaving bank 0 */
-#define XEN_ACCOUNT_GET_STAMP		\
-	MOV_FROM_ITC(pUStk, p6, r20, r2);
-#else
-#define XEN_ACCOUNT_GET_STAMP
-#endif
-
-/*
- * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
- * the minimum state necessary that allows us to turn psr.ic back
- * on.
- *
- * Assumed state upon entry:
- *	psr.ic: off
- *	r31:	contains saved predicates (pr)
- *
- * Upon exit, the state is as follows:
- *	psr.ic: off
- *	 r2 = points to &pt_regs.r16
- *	 r8 = contents of ar.ccv
- *	 r9 = contents of ar.csd
- *	r10 = contents of ar.ssd
- *	r11 = FPSR_DEFAULT
- *	r12 = kernel sp (kernel virtual address)
- *	r13 = points to current task_struct (kernel virtual address)
- *	p15 = TRUE if psr.i is set in cr.ipsr
- *	predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
- *		preserved
- * CONFIG_XEN note: p6/p7 are not preserved
- *
- * Note that psr.ic is NOT turned on by this macro.  This is so that
- * we can pass interruption state as arguments to a handler.
- */
-#define XEN_DO_SAVE_MIN(__COVER,SAVE_IFS,EXTRA,WORKAROUND)					\
-	mov r16=IA64_KR(CURRENT);	/* M */							\
-	mov r27=ar.rsc;			/* M */							\
-	mov r20=r1;			/* A */							\
-	mov r25=ar.unat;		/* M */							\
-	MOV_FROM_IPSR(p0,r29);		/* M */							\
-	MOV_FROM_IIP(r28);		/* M */							\
-	mov r21=ar.fpsr;		/* M */							\
-	mov r26=ar.pfs;			/* I */							\
-	__COVER;			/* B;; (or nothing) */					\
-	adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16;						\
-	;;											\
-	ld1 r17=[r16];				/* load current->thread.on_ustack flag */	\
-	st1 [r16]=r0;				/* clear current->thread.on_ustack flag */	\
-	adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16						\
-	/* switch from user to kernel RBS: */							\
-	;;											\
-	invala;				/* M */							\
-	/* SAVE_IFS;*/ /* see xen special handling below */					\
-	cmp.eq pKStk,pUStk=r0,r17;		/* are we in kernel mode already? */		\
-	;;											\
-(pUStk)	mov ar.rsc=0;		/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */	\
-	;;											\
-(pUStk)	mov.m r24=ar.rnat;									\
-(pUStk)	addl r22=IA64_RBS_OFFSET,r1;			/* compute base of RBS */		\
-(pKStk) mov r1=sp;					/* get sp  */				\
-	;;											\
-(pUStk) lfetch.fault.excl.nt1 [r22];								\
-(pUStk)	addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1;	/* compute base of memory stack */	\
-(pUStk)	mov r23=ar.bspstore;				/* save ar.bspstore */			\
-	;;											\
-(pUStk)	mov ar.bspstore=r22;				/* switch to kernel RBS */		\
-(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1;			/* if in kernel mode, use sp (r12) */	\
-	;;											\
-(pUStk)	mov r18=ar.bsp;										\
-(pUStk)	mov ar.rsc=0x3;		/* set eager mode, pl 0, little-endian, loadrs=0 */		\
-	adds r17=2*L1_CACHE_BYTES,r1;		/* really: biggest cache-line size */		\
-	adds r16=PT(CR_IPSR),r1;								\
-	;;											\
-	lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES;						\
-	st8 [r16]=r29;		/* save cr.ipsr */						\
-	;;											\
-	lfetch.fault.excl.nt1 [r17];								\
-	tbit.nz p15,p0=r29,IA64_PSR_I_BIT;							\
-	mov r29=b0										\
-	;;											\
-	WORKAROUND;										\
-	adds r16=PT(R8),r1;	/* initialize first base pointer */				\
-	adds r17=PT(R9),r1;	/* initialize second base pointer */				\
-(pKStk)	mov r18=r0;		/* make sure r18 isn't NaT */					\
-	;;											\
-.mem.offset 0,0; st8.spill [r16]=r8,16;								\
-.mem.offset 8,0; st8.spill [r17]=r9,16;								\
-        ;;											\
-.mem.offset 0,0; st8.spill [r16]=r10,24;							\
-	movl r8=XSI_PRECOVER_IFS;								\
-.mem.offset 8,0; st8.spill [r17]=r11,24;							\
-        ;;											\
-	/* xen special handling for possibly lazy cover */					\
-	/* SAVE_MIN case in dispatch_ia32_handler: mov r30=r0 */				\
-	ld8 r30=[r8];										\
-(pUStk)	sub r18=r18,r22;	/* r18=RSE.ndirty*8 */						\
-	st8 [r16]=r28,16;	/* save cr.iip */						\
-	;;											\
-	st8 [r17]=r30,16;	/* save cr.ifs */						\
-	mov r8=ar.ccv;										\
-	mov r9=ar.csd;										\
-	mov r10=ar.ssd;										\
-	movl r11=FPSR_DEFAULT;   /* L-unit */							\
-	;;											\
-	st8 [r16]=r25,16;	/* save ar.unat */						\
-	st8 [r17]=r26,16;	/* save ar.pfs */						\
-	shl r18=r18,16;		/* compute ar.rsc to be used for "loadrs" */			\
-	;;											\
-	st8 [r16]=r27,16;	/* save ar.rsc */						\
-(pUStk)	st8 [r17]=r24,16;	/* save ar.rnat */						\
-(pKStk)	adds r17=16,r17;	/* skip over ar_rnat field */					\
-	;;			/* avoid RAW on r16 & r17 */					\
-(pUStk)	st8 [r16]=r23,16;	/* save ar.bspstore */						\
-	st8 [r17]=r31,16;	/* save predicates */						\
-(pKStk)	adds r16=16,r16;	/* skip over ar_bspstore field */				\
-	;;											\
-	st8 [r16]=r29,16;	/* save b0 */							\
-	st8 [r17]=r18,16;	/* save ar.rsc value for "loadrs" */				\
-	cmp.eq pNonSys,pSys=r0,r0	/* initialize pSys=0, pNonSys=1 */			\
-	;;											\
-.mem.offset 0,0; st8.spill [r16]=r20,16;	/* save original r1 */				\
-.mem.offset 8,0; st8.spill [r17]=r12,16;							\
-	adds r12=-16,r1;	/* switch to kernel memory stack (with 16 bytes of scratch) */	\
-	;;											\
-.mem.offset 0,0; st8.spill [r16]=r13,16;							\
-.mem.offset 8,0; st8.spill [r17]=r21,16;	/* save ar.fpsr */				\
-	mov r13=IA64_KR(CURRENT);	/* establish `current' */				\
-	;;											\
-.mem.offset 0,0; st8.spill [r16]=r15,16;							\
-.mem.offset 8,0; st8.spill [r17]=r14,16;							\
-	;;											\
-.mem.offset 0,0; st8.spill [r16]=r2,16;								\
-.mem.offset 8,0; st8.spill [r17]=r3,16;								\
-	XEN_ACCOUNT_GET_STAMP									\
-	adds r2=IA64_PT_REGS_R16_OFFSET,r1;							\
-	;;											\
-	EXTRA;											\
-	movl r1=__gp;		/* establish kernel global pointer */				\
-	;;											\
-	ACCOUNT_SYS_ENTER									\
-	BSW_1(r3,r14);	/* switch back to bank 1 (must be last in insn group) */		\
-	;;

+ 0 - 38
arch/ia64/include/asm/xen/page-coherent.h

@@ -1,38 +0,0 @@
-#ifndef _ASM_IA64_XEN_PAGE_COHERENT_H
-#define _ASM_IA64_XEN_PAGE_COHERENT_H
-
-#include <asm/page.h>
-#include <linux/dma-attrs.h>
-#include <linux/dma-mapping.h>
-
-static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
-		dma_addr_t *dma_handle, gfp_t flags,
-		struct dma_attrs *attrs)
-{
-	void *vstart = (void*)__get_free_pages(flags, get_order(size));
-	*dma_handle = virt_to_phys(vstart);
-	return vstart;
-}
-
-static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
-		void *cpu_addr, dma_addr_t dma_handle,
-		struct dma_attrs *attrs)
-{
-	free_pages((unsigned long) cpu_addr, get_order(size));
-}
-
-static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
-	     unsigned long offset, size_t size, enum dma_data_direction dir,
-	     struct dma_attrs *attrs) { }
-
-static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
-		size_t size, enum dma_data_direction dir,
-		struct dma_attrs *attrs) { }
-
-static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
-		dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
-
-static inline void xen_dma_sync_single_for_device(struct device *hwdev,
-		dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
-
-#endif /* _ASM_IA64_XEN_PAGE_COHERENT_H */

+ 0 - 65
arch/ia64/include/asm/xen/page.h

@@ -1,65 +0,0 @@
-/******************************************************************************
- * arch/ia64/include/asm/xen/page.h
- *
- * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
- *                    VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
- */
-
-#ifndef _ASM_IA64_XEN_PAGE_H
-#define _ASM_IA64_XEN_PAGE_H
-
-#define INVALID_P2M_ENTRY	(~0UL)
-
-static inline unsigned long mfn_to_pfn(unsigned long mfn)
-{
-	return mfn;
-}
-
-static inline unsigned long pfn_to_mfn(unsigned long pfn)
-{
-	return pfn;
-}
-
-#define phys_to_machine_mapping_valid(_x)	(1)
-
-static inline void *mfn_to_virt(unsigned long mfn)
-{
-	return __va(mfn << PAGE_SHIFT);
-}
-
-static inline unsigned long virt_to_mfn(void *virt)
-{
-	return __pa(virt) >> PAGE_SHIFT;
-}
-
-/* for tpmfront.c */
-static inline unsigned long virt_to_machine(void *virt)
-{
-	return __pa(virt);
-}
-
-static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
-{
-	/* nothing */
-}
-
-#define pte_mfn(_x)	pte_pfn(_x)
-#define mfn_pte(_x, _y)	__pte_ma(0)		/* unmodified use */
-#define __pte_ma(_x)	((pte_t) {(_x)})        /* unmodified use */
-
-#endif /* _ASM_IA64_XEN_PAGE_H */

+ 0 - 38
arch/ia64/include/asm/xen/patchlist.h

@@ -1,38 +0,0 @@
-/******************************************************************************
- * arch/ia64/include/asm/xen/patchlist.h
- *
- * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
- *                    VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
- */
-
-#define __paravirt_start_gate_fsyscall_patchlist		\
-	__xen_start_gate_fsyscall_patchlist
-#define __paravirt_end_gate_fsyscall_patchlist			\
-	__xen_end_gate_fsyscall_patchlist
-#define __paravirt_start_gate_brl_fsys_bubble_down_patchlist	\
-	__xen_start_gate_brl_fsys_bubble_down_patchlist
-#define __paravirt_end_gate_brl_fsys_bubble_down_patchlist	\
-	__xen_end_gate_brl_fsys_bubble_down_patchlist
-#define __paravirt_start_gate_vtop_patchlist			\
-	__xen_start_gate_vtop_patchlist
-#define __paravirt_end_gate_vtop_patchlist			\
-	__xen_end_gate_vtop_patchlist
-#define __paravirt_start_gate_mckinley_e9_patchlist		\
-	__xen_start_gate_mckinley_e9_patchlist
-#define __paravirt_end_gate_mckinley_e9_patchlist		\
-	__xen_end_gate_mckinley_e9_patchlist

+ 0 - 135
arch/ia64/include/asm/xen/privop.h

@@ -1,135 +0,0 @@
-#ifndef _ASM_IA64_XEN_PRIVOP_H
-#define _ASM_IA64_XEN_PRIVOP_H
-
-/*
- * Copyright (C) 2005 Hewlett-Packard Co
- *	Dan Magenheimer <dan.magenheimer@hp.com>
- *
- * Paravirtualizations of privileged operations for Xen/ia64
- *
- *
- * inline privop and paravirt_alt support
- * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
- *                    VA Linux Systems Japan K.K.
- *
- */
-
-#ifndef __ASSEMBLY__
-#include <linux/types.h>		/* arch-ia64.h requires uint64_t */
-#endif
-#include <asm/xen/interface.h>
-
-/* At 1 MB, before per-cpu space but still addressable using addl instead
-   of movl. */
-#define XSI_BASE			0xfffffffffff00000
-
-/* Address of mapped regs.  */
-#define XMAPPEDREGS_BASE		(XSI_BASE + XSI_SIZE)
-
-#ifdef __ASSEMBLY__
-#define XEN_HYPER_RFI			break HYPERPRIVOP_RFI
-#define XEN_HYPER_RSM_PSR_DT		break HYPERPRIVOP_RSM_DT
-#define XEN_HYPER_SSM_PSR_DT		break HYPERPRIVOP_SSM_DT
-#define XEN_HYPER_COVER			break HYPERPRIVOP_COVER
-#define XEN_HYPER_ITC_D			break HYPERPRIVOP_ITC_D
-#define XEN_HYPER_ITC_I			break HYPERPRIVOP_ITC_I
-#define XEN_HYPER_SSM_I			break HYPERPRIVOP_SSM_I
-#define XEN_HYPER_GET_IVR		break HYPERPRIVOP_GET_IVR
-#define XEN_HYPER_THASH			break HYPERPRIVOP_THASH
-#define XEN_HYPER_ITR_D			break HYPERPRIVOP_ITR_D
-#define XEN_HYPER_SET_KR		break HYPERPRIVOP_SET_KR
-#define XEN_HYPER_GET_PSR		break HYPERPRIVOP_GET_PSR
-#define XEN_HYPER_SET_RR0_TO_RR4	break HYPERPRIVOP_SET_RR0_TO_RR4
-
-#define XSI_IFS				(XSI_BASE + XSI_IFS_OFS)
-#define XSI_PRECOVER_IFS		(XSI_BASE + XSI_PRECOVER_IFS_OFS)
-#define XSI_IFA				(XSI_BASE + XSI_IFA_OFS)
-#define XSI_ISR				(XSI_BASE + XSI_ISR_OFS)
-#define XSI_IIM				(XSI_BASE + XSI_IIM_OFS)
-#define XSI_ITIR			(XSI_BASE + XSI_ITIR_OFS)
-#define XSI_PSR_I_ADDR			(XSI_BASE + XSI_PSR_I_ADDR_OFS)
-#define XSI_PSR_IC			(XSI_BASE + XSI_PSR_IC_OFS)
-#define XSI_IPSR			(XSI_BASE + XSI_IPSR_OFS)
-#define XSI_IIP				(XSI_BASE + XSI_IIP_OFS)
-#define XSI_B1NAT			(XSI_BASE + XSI_B1NATS_OFS)
-#define XSI_BANK1_R16			(XSI_BASE + XSI_BANK1_R16_OFS)
-#define XSI_BANKNUM			(XSI_BASE + XSI_BANKNUM_OFS)
-#define XSI_IHA				(XSI_BASE + XSI_IHA_OFS)
-#define XSI_ITC_OFFSET			(XSI_BASE + XSI_ITC_OFFSET_OFS)
-#define XSI_ITC_LAST			(XSI_BASE + XSI_ITC_LAST_OFS)
-#endif
-
-#ifndef __ASSEMBLY__
-
-/************************************************/
-/* Instructions paravirtualized for correctness */
-/************************************************/
-
-/* "fc" and "thash" are privilege-sensitive instructions, meaning they
- *  may have different semantics depending on whether they are executed
- *  at PL0 vs PL!=0.  When paravirtualized, these instructions mustn't
- *  be allowed to execute directly, lest incorrect semantics result. */
-extern void xen_fc(void *addr);
-extern unsigned long xen_thash(unsigned long addr);
-
-/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
- * is not currently used (though it may be in a long-format VHPT system!)
- * and the semantics of cover only change if psr.ic is off which is very
- * rare (and currently non-existent outside of assembly code */
-
-/* There are also privilege-sensitive registers.  These registers are
- * readable at any privilege level but only writable at PL0. */
-extern unsigned long xen_get_cpuid(int index);
-extern unsigned long xen_get_pmd(int index);
-
-#ifndef ASM_SUPPORTED
-extern unsigned long xen_get_eflag(void);	/* see xen_ia64_getreg */
-extern void xen_set_eflag(unsigned long);	/* see xen_ia64_setreg */
-#endif
-
-/************************************************/
-/* Instructions paravirtualized for performance */
-/************************************************/
-
-/* Xen uses memory-mapped virtual privileged registers for access to many
- * performance-sensitive privileged registers.  Some, like the processor
- * status register (psr), are broken up into multiple memory locations.
- * Others, like "pend", are abstractions based on privileged registers.
- * "Pend" is guaranteed to be set if reading cr.ivr would return a
- * (non-spurious) interrupt. */
-#define XEN_MAPPEDREGS ((struct mapped_regs *)XMAPPEDREGS_BASE)
-
-#define XSI_PSR_I			\
-	(*XEN_MAPPEDREGS->interrupt_mask_addr)
-#define xen_get_virtual_psr_i()		\
-	(!XSI_PSR_I)
-#define xen_set_virtual_psr_i(_val)	\
-	({ XSI_PSR_I = (uint8_t)(_val) ? 0 : 1; })
-#define xen_set_virtual_psr_ic(_val)	\
-	({ XEN_MAPPEDREGS->interrupt_collection_enabled = _val ? 1 : 0; })
-#define xen_get_virtual_pend()		\
-	(*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1))
-
-#ifndef ASM_SUPPORTED
-/* Although all privileged operations can be left to trap and will
- * be properly handled by Xen, some are frequent enough that we use
- * hyperprivops for performance. */
-extern unsigned long xen_get_psr(void);
-extern unsigned long xen_get_ivr(void);
-extern unsigned long xen_get_tpr(void);
-extern void xen_hyper_ssm_i(void);
-extern void xen_set_itm(unsigned long);
-extern void xen_set_tpr(unsigned long);
-extern void xen_eoi(unsigned long);
-extern unsigned long xen_get_rr(unsigned long index);
-extern void xen_set_rr(unsigned long index, unsigned long val);
-extern void xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
-			       unsigned long val2, unsigned long val3,
-			       unsigned long val4);
-extern void xen_set_kr(unsigned long index, unsigned long val);
-extern void xen_ptcga(unsigned long addr, unsigned long size);
-#endif /* !ASM_SUPPORTED */
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* _ASM_IA64_XEN_PRIVOP_H */

+ 0 - 51
arch/ia64/include/asm/xen/xcom_hcall.h

@@ -1,51 +0,0 @@
-/*
- * Copyright (C) 2006 Tristan Gingold <tristan.gingold@bull.net>, Bull SAS
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
- */
-
-#ifndef _ASM_IA64_XEN_XCOM_HCALL_H
-#define _ASM_IA64_XEN_XCOM_HCALL_H
-
-/* These function creates inline or mini descriptor for the parameters and
-   calls the corresponding xencomm_arch_hypercall_X.
-   Architectures should defines HYPERVISOR_xxx as xencomm_hypercall_xxx unless
-   they want to use their own wrapper.  */
-extern int xencomm_hypercall_console_io(int cmd, int count, char *str);
-
-extern int xencomm_hypercall_event_channel_op(int cmd, void *op);
-
-extern int xencomm_hypercall_xen_version(int cmd, void *arg);
-
-extern int xencomm_hypercall_physdev_op(int cmd, void *op);
-
-extern int xencomm_hypercall_grant_table_op(unsigned int cmd, void *op,
-					    unsigned int count);
-
-extern int xencomm_hypercall_sched_op(int cmd, void *arg);
-
-extern int xencomm_hypercall_multicall(void *call_list, int nr_calls);
-
-extern int xencomm_hypercall_callback_op(int cmd, void *arg);
-
-extern int xencomm_hypercall_memory_op(unsigned int cmd, void *arg);
-
-extern int xencomm_hypercall_suspend(unsigned long srec);
-
-extern long xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg);
-
-extern long xencomm_hypercall_opt_feature(void *arg);
-
-#endif /* _ASM_IA64_XEN_XCOM_HCALL_H */

+ 0 - 42
arch/ia64/include/asm/xen/xencomm.h

@@ -1,42 +0,0 @@
-/*
- * Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
- */
-
-#ifndef _ASM_IA64_XEN_XENCOMM_H
-#define _ASM_IA64_XEN_XENCOMM_H
-
-#include <xen/xencomm.h>
-#include <asm/pgtable.h>
-
-/* Must be called before any hypercall.  */
-extern void xencomm_initialize(void);
-extern int xencomm_is_initialized(void);
-
-/* Check if virtual contiguity means physical contiguity
- * where the passed address is a pointer value in virtual address.
- * On ia64, identity mapping area in region 7 or the piece of region 5
- * that is mapped by itr[IA64_TR_KERNEL]/dtr[IA64_TR_KERNEL]
- */
-static inline int xencomm_is_phys_contiguous(unsigned long addr)
-{
-	return (PAGE_OFFSET <= addr &&
-		addr < (PAGE_OFFSET + (1UL << IA64_MAX_PHYS_BITS))) ||
-		(KERNEL_START <= addr &&
-		 addr < KERNEL_START + KERNEL_TR_PAGE_SIZE);
-}
-
-#endif /* _ASM_IA64_XEN_XENCOMM_H */

+ 0 - 9
arch/ia64/include/uapi/asm/break.h

@@ -20,13 +20,4 @@
  */
 #define __IA64_BREAK_SYSCALL		0x100000
 
-/*
- * Xen specific break numbers:
- */
-#define __IA64_XEN_HYPERCALL		0x1000
-/* [__IA64_XEN_HYPERPRIVOP_START, __IA64_XEN_HYPERPRIVOP_MAX] is used
-   for xen hyperprivops */
-#define __IA64_XEN_HYPERPRIVOP_START	0x1
-#define __IA64_XEN_HYPERPRIVOP_MAX	0x1a
-
 #endif /* _ASM_IA64_BREAK_H */

+ 0 - 3
arch/ia64/kernel/acpi.c

@@ -53,7 +53,6 @@
 #include <asm/numa.h>
 #include <asm/sal.h>
 #include <asm/cyclone.h>
-#include <asm/xen/hypervisor.h>
 
 #define BAD_MADT_ENTRY(entry, end) (                                        \
 		(!entry) || (unsigned long)entry + sizeof(*entry) > end ||  \
@@ -120,8 +119,6 @@ acpi_get_sysname(void)
 			return "uv";
 		else
 			return "sn2";
-	} else if (xen_pv_domain() && !strcmp(hdr->oem_id, "XEN")) {
-		return "xen";
 	}
 
 #ifdef CONFIG_INTEL_IOMMU

+ 0 - 32
arch/ia64/kernel/asm-offsets.c

@@ -16,9 +16,6 @@
 #include <asm/sigcontext.h>
 #include <asm/mca.h>
 
-#include <asm/xen/interface.h>
-#include <asm/xen/hypervisor.h>
-
 #include "../kernel/sigframe.h"
 #include "../kernel/fsyscall_gtod_data.h"
 
@@ -290,33 +287,4 @@ void foo(void)
 	DEFINE(IA64_ITC_LASTCYCLE_OFFSET,
 		offsetof (struct itc_jitter_data_t, itc_lastcycle));
 
-#ifdef CONFIG_XEN
-	BLANK();
-
-	DEFINE(XEN_NATIVE_ASM, XEN_NATIVE);
-	DEFINE(XEN_PV_DOMAIN_ASM, XEN_PV_DOMAIN);
-
-#define DEFINE_MAPPED_REG_OFS(sym, field) \
-	DEFINE(sym, (XMAPPEDREGS_OFS + offsetof(struct mapped_regs, field)))
-
-	DEFINE_MAPPED_REG_OFS(XSI_PSR_I_ADDR_OFS, interrupt_mask_addr);
-	DEFINE_MAPPED_REG_OFS(XSI_IPSR_OFS, ipsr);
-	DEFINE_MAPPED_REG_OFS(XSI_IIP_OFS, iip);
-	DEFINE_MAPPED_REG_OFS(XSI_IFS_OFS, ifs);
-	DEFINE_MAPPED_REG_OFS(XSI_PRECOVER_IFS_OFS, precover_ifs);
-	DEFINE_MAPPED_REG_OFS(XSI_ISR_OFS, isr);
-	DEFINE_MAPPED_REG_OFS(XSI_IFA_OFS, ifa);
-	DEFINE_MAPPED_REG_OFS(XSI_IIPA_OFS, iipa);
-	DEFINE_MAPPED_REG_OFS(XSI_IIM_OFS, iim);
-	DEFINE_MAPPED_REG_OFS(XSI_IHA_OFS, iha);
-	DEFINE_MAPPED_REG_OFS(XSI_ITIR_OFS, itir);
-	DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled);
-	DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum);
-	DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]);
-	DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]);
-	DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat);
-	DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat);
-	DEFINE_MAPPED_REG_OFS(XSI_ITC_OFFSET_OFS, itc_offset);
-	DEFINE_MAPPED_REG_OFS(XSI_ITC_LAST_OFS, itc_last);
-#endif /* CONFIG_XEN */
 }

+ 0 - 3
arch/ia64/kernel/head.S

@@ -416,8 +416,6 @@ start_ap:
 
 default_setup_hook = 0		// Currently nothing needs to be done.
 
-	.weak xen_setup_hook
-
 	.global hypervisor_type
 hypervisor_type:
 	data8		PARAVIRT_HYPERVISOR_TYPE_DEFAULT
@@ -426,7 +424,6 @@ hypervisor_type:
 
 hypervisor_setup_hooks:
 	data8		default_setup_hook
-	data8		xen_setup_hook
 num_hypervisor_hooks = (. - hypervisor_setup_hooks) / 8
 	.previous
 

+ 0 - 4
arch/ia64/kernel/nr-irqs.c

@@ -10,15 +10,11 @@
 #include <linux/kbuild.h>
 #include <linux/threads.h>
 #include <asm/native/irq.h>
-#include <asm/xen/irq.h>
 
 void foo(void)
 {
 	union paravirt_nr_irqs_max {
 		char ia64_native_nr_irqs[IA64_NATIVE_NR_IRQS];
-#ifdef CONFIG_XEN
-		char xen_nr_irqs[XEN_NR_IRQS];
-#endif
 	};
 
 	DEFINE(NR_IRQS, sizeof (union paravirt_nr_irqs_max));

+ 0 - 3
arch/ia64/kernel/paravirt_inst.h

@@ -22,9 +22,6 @@
 
 #ifdef __IA64_ASM_PARAVIRTUALIZED_PVCHECK
 #include <asm/native/pvchk_inst.h>
-#elif defined(__IA64_ASM_PARAVIRTUALIZED_XEN)
-#include <asm/xen/inst.h>
-#include <asm/xen/minstate.h>
 #else
 #include <asm/native/inst.h>
 #endif

+ 0 - 4
arch/ia64/kernel/paravirt_patchlist.h

@@ -20,9 +20,5 @@
  *
  */
 
-#if defined(__IA64_GATE_PARAVIRTUALIZED_XEN)
-#include <asm/xen/patchlist.h>
-#else
 #include <asm/native/patchlist.h>
-#endif
 

+ 0 - 6
arch/ia64/kernel/vmlinux.lds.S

@@ -182,12 +182,6 @@ SECTIONS {
 		__start_gate_section = .;
 		*(.data..gate)
 		__stop_gate_section = .;
-#ifdef CONFIG_XEN
-		. = ALIGN(PAGE_SIZE);
-		__xen_start_gate_section = .;
-		*(.data..gate.xen)
-		__xen_stop_gate_section = .;
-#endif
 	}
 	/*
 	 * make sure the gate page doesn't expose

+ 0 - 25
arch/ia64/xen/Kconfig

@@ -1,25 +0,0 @@
-#
-# This Kconfig describes xen/ia64 options
-#
-
-config XEN
-	bool "Xen hypervisor support"
-	default y
-	depends on PARAVIRT && MCKINLEY && IA64_PAGE_SIZE_16KB
-	select XEN_XENCOMM
-	select NO_IDLE_HZ
-	# followings are required to save/restore.
-	select ARCH_SUSPEND_POSSIBLE
-	select SUSPEND
-	select PM_SLEEP
-	help
-	  Enable Xen hypervisor support.  Resulting kernel runs
-	  both as a guest OS on Xen and natively on hardware.
-
-config XEN_XENCOMM
-	depends on XEN
-	bool
-
-config NO_IDLE_HZ
-	depends on XEN
-	bool

+ 0 - 37
arch/ia64/xen/Makefile

@@ -1,37 +0,0 @@
-#
-# Makefile for Xen components
-#
-
-obj-y := hypercall.o xenivt.o xensetup.o xen_pv_ops.o irq_xen.o \
-	 hypervisor.o xencomm.o xcom_hcall.o grant-table.o time.o suspend.o \
-	 gate-data.o
-
-obj-$(CONFIG_IA64_GENERIC) += machvec.o
-
-# The gate DSO image is built using a special linker script.
-include $(srctree)/arch/ia64/kernel/Makefile.gate
-
-# tell compiled for xen
-CPPFLAGS_gate.lds += -D__IA64_GATE_PARAVIRTUALIZED_XEN
-AFLAGS_gate.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN -D__IA64_GATE_PARAVIRTUALIZED_XEN
-
-# use same file of native.
-$(obj)/gate.o: $(src)/../kernel/gate.S FORCE
-	$(call if_changed_dep,as_o_S)
-$(obj)/gate.lds: $(src)/../kernel/gate.lds.S FORCE
-	$(call if_changed_dep,cpp_lds_S)
-
-
-AFLAGS_xenivt.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN
-
-# xen multi compile
-ASM_PARAVIRT_MULTI_COMPILE_SRCS = ivt.S entry.S fsys.S
-ASM_PARAVIRT_OBJS = $(addprefix xen-,$(ASM_PARAVIRT_MULTI_COMPILE_SRCS:.S=.o))
-obj-y += $(ASM_PARAVIRT_OBJS)
-define paravirtualized_xen
-AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_XEN
-endef
-$(foreach o,$(ASM_PARAVIRT_OBJS),$(eval $(call paravirtualized_xen,$(o))))
-
-$(obj)/xen-%.o: $(src)/../kernel/%.S FORCE
-	$(call if_changed_dep,as_o_S)

+ 0 - 3
arch/ia64/xen/gate-data.S

@@ -1,3 +0,0 @@
-	.section .data..gate.xen, "aw"
-
-	.incbin "arch/ia64/xen/gate.so"

+ 0 - 94
arch/ia64/xen/grant-table.c

@@ -1,94 +0,0 @@
-/******************************************************************************
- * arch/ia64/xen/grant-table.c
- *
- * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
- *                    VA Linux Systems Japan K.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
- */
-
-#include <linux/module.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-
-#include <xen/interface/xen.h>
-#include <xen/interface/memory.h>
-#include <xen/grant_table.h>
-
-#include <asm/xen/hypervisor.h>
-
-/****************************************************************************
- * grant table hack
- * cmd: GNTTABOP_xxx
- */
-
-int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
-			   unsigned long max_nr_gframes,
-			   struct grant_entry **__shared)
-{
-	*__shared = __va(frames[0] << PAGE_SHIFT);
-	return 0;
-}
-
-void arch_gnttab_unmap_shared(struct grant_entry *shared,
-			      unsigned long nr_gframes)
-{
-	/* nothing */
-}
-
-static void
-gnttab_map_grant_ref_pre(struct gnttab_map_grant_ref *uop)
-{
-	uint32_t flags;
-
-	flags = uop->flags;
-
-	if (flags & GNTMAP_host_map) {
-		if (flags & GNTMAP_application_map) {
-			printk(KERN_DEBUG
-			       "GNTMAP_application_map is not supported yet: "
-			       "flags 0x%x\n", flags);
-			BUG();
-		}
-		if (flags & GNTMAP_contains_pte) {
-			printk(KERN_DEBUG
-			       "GNTMAP_contains_pte is not supported yet: "
-			       "flags 0x%x\n", flags);
-			BUG();
-		}
-	} else if (flags & GNTMAP_device_map) {
-		printk("GNTMAP_device_map is not supported yet 0x%x\n", flags);
-		BUG();	/* not yet. actually this flag is not used. */
-	} else {
-		BUG();
-	}
-}
-
-int
-HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
-{
-	if (cmd == GNTTABOP_map_grant_ref) {
-		unsigned int i;
-		for (i = 0; i < count; i++) {
-			gnttab_map_grant_ref_pre(
-				(struct gnttab_map_grant_ref *)uop + i);
-		}
-	}
-	return xencomm_hypercall_grant_table_op(cmd, uop, count);
-}
-
-EXPORT_SYMBOL(HYPERVISOR_grant_table_op);

+ 0 - 88
arch/ia64/xen/hypercall.S

@@ -1,88 +0,0 @@
-/*
- * Support routines for Xen hypercalls
- *
- * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer@hp.com>
- * Copyright (C) 2008 Yaozu (Eddie) Dong <eddie.dong@intel.com>
- */
-
-#include <asm/asmmacro.h>
-#include <asm/intrinsics.h>
-#include <asm/xen/privop.h>
-
-#ifdef __INTEL_COMPILER
-/*
- * Hypercalls without parameter.
- */
-#define __HCALL0(name,hcall)		\
-	GLOBAL_ENTRY(name);		\
-	break	hcall;			\
-	br.ret.sptk.many rp;		\
-	END(name)
-
-/*
- * Hypercalls with 1 parameter.
- */
-#define __HCALL1(name,hcall)		\
-	GLOBAL_ENTRY(name);		\
-	mov r8=r32;			\
-	break	hcall;			\
-	br.ret.sptk.many rp;		\
-	END(name)
-
-/*
- * Hypercalls with 2 parameters.
- */
-#define __HCALL2(name,hcall)		\
-	GLOBAL_ENTRY(name);		\
-	mov r8=r32;			\
-	mov r9=r33;			\
-	break	hcall;			\
-	br.ret.sptk.many rp;		\
-	END(name)
-
-__HCALL0(xen_get_psr, HYPERPRIVOP_GET_PSR)
-__HCALL0(xen_get_ivr, HYPERPRIVOP_GET_IVR)
-__HCALL0(xen_get_tpr, HYPERPRIVOP_GET_TPR)
-__HCALL0(xen_hyper_ssm_i, HYPERPRIVOP_SSM_I)
-
-__HCALL1(xen_set_tpr, HYPERPRIVOP_SET_TPR)
-__HCALL1(xen_eoi, HYPERPRIVOP_EOI)
-__HCALL1(xen_thash, HYPERPRIVOP_THASH)
-__HCALL1(xen_set_itm, HYPERPRIVOP_SET_ITM)
-__HCALL1(xen_get_rr, HYPERPRIVOP_GET_RR)
-__HCALL1(xen_fc, HYPERPRIVOP_FC)
-__HCALL1(xen_get_cpuid, HYPERPRIVOP_GET_CPUID)
-__HCALL1(xen_get_pmd, HYPERPRIVOP_GET_PMD)
-
-__HCALL2(xen_ptcga, HYPERPRIVOP_PTC_GA)
-__HCALL2(xen_set_rr, HYPERPRIVOP_SET_RR)
-__HCALL2(xen_set_kr, HYPERPRIVOP_SET_KR)
-
-GLOBAL_ENTRY(xen_set_rr0_to_rr4)
-	mov r8=r32
-	mov r9=r33
-	mov r10=r34
-	mov r11=r35
-	mov r14=r36
-	XEN_HYPER_SET_RR0_TO_RR4
-	br.ret.sptk.many rp
-	;;
-END(xen_set_rr0_to_rr4)
-#endif
-
-GLOBAL_ENTRY(xen_send_ipi)
-	mov r14=r32
-	mov r15=r33
-	mov r2=0x400
-	break 0x1000
-	;;
-	br.ret.sptk.many rp
-	;;
-END(xen_send_ipi)
-
-GLOBAL_ENTRY(__hypercall)
-	mov r2=r37
-	break 0x1000
-	br.ret.sptk.many b0
-	;;
-END(__hypercall)

Some files were not shown because too many files changed in this diff