Selaa lähdekoodia

Merge branch 'perf/urgent' into perf/core, to resolve conflicts

Conflicts:
	tools/perf/builtin-kmem.c

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Ingo Molnar 10 vuotta sitten
vanhempi
commit
f7dc7fd1c0
100 muutettua tiedostoa jossa 1557 lisäystä ja 456 poistoa
  1. 4 1
      Documentation/IPMI.txt
  2. 1 1
      Documentation/acpi/enumeration.txt
  3. 3 3
      Documentation/acpi/gpio-properties.txt
  4. 30 0
      Documentation/devicetree/bindings/rtc/abracon,abx80x.txt
  5. 5 3
      Documentation/kasan.txt
  6. 6 2
      MAINTAINERS
  7. 1 0
      arch/arm/include/asm/xen/page.h
  8. 15 0
      arch/arm/xen/mm.c
  9. 2 0
      arch/x86/boot/compressed/eboot.c
  10. 1 1
      arch/x86/include/asm/hypervisor.h
  11. 1 1
      arch/x86/include/asm/spinlock.h
  12. 5 0
      arch/x86/include/asm/xen/page.h
  13. 2 2
      arch/x86/kernel/cpu/hypervisor.c
  14. 41 32
      arch/x86/kernel/cpu/perf_event_intel.c
  15. 1 0
      arch/x86/kernel/cpu/perf_event_intel_rapl.c
  16. 12 0
      arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c
  17. 8 6
      arch/x86/kernel/process.c
  18. 8 6
      arch/x86/mm/ioremap.c
  19. 22 2
      arch/x86/pci/acpi.c
  20. 18 9
      arch/x86/xen/enlighten.c
  21. 10 0
      arch/x86/xen/suspend.c
  22. 2 0
      drivers/acpi/acpi_pnp.c
  23. 1 1
      drivers/acpi/resource.c
  24. 22 0
      drivers/acpi/sbshc.c
  25. 11 24
      drivers/block/xen-blkback/blkback.c
  26. 23 0
      drivers/block/zram/zram_drv.c
  27. 9 9
      drivers/char/hw_random/bcm63xx-rng.c
  28. 2 2
      drivers/char/ipmi/ipmi_msghandler.c
  29. 9 7
      drivers/char/ipmi/ipmi_si_intf.c
  30. 178 35
      drivers/char/ipmi/ipmi_ssif.c
  31. 3 3
      drivers/firmware/efi/runtime-map.c
  32. 3 10
      drivers/infiniband/core/addr.c
  33. 11 12
      drivers/infiniband/core/cm.c
  34. 2 2
      drivers/infiniband/core/cm_msgs.h
  35. 17 10
      drivers/infiniband/core/cma.c
  36. 72 1
      drivers/infiniband/core/iwpm_msg.c
  37. 175 33
      drivers/infiniband/core/iwpm_util.c
  38. 15 0
      drivers/infiniband/core/iwpm_util.h
  39. 7 7
      drivers/infiniband/core/umem_odp.c
  40. 71 16
      drivers/infiniband/hw/cxgb4/cm.c
  41. 14 8
      drivers/infiniband/hw/cxgb4/cq.c
  42. 34 3
      drivers/infiniband/hw/cxgb4/device.c
  43. 7 0
      drivers/infiniband/hw/cxgb4/iw_cxgb4.h
  44. 3 3
      drivers/infiniband/hw/cxgb4/mem.c
  45. 5 5
      drivers/infiniband/hw/cxgb4/qp.c
  46. 4 3
      drivers/infiniband/hw/cxgb4/t4.h
  47. 3 1
      drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
  48. 1 0
      drivers/infiniband/hw/nes/nes.c
  49. 48 17
      drivers/infiniband/hw/nes/nes_cm.c
  50. 0 1
      drivers/infiniband/hw/qib/qib.h
  51. 2 1
      drivers/infiniband/hw/qib/qib_file_ops.c
  52. 3 5
      drivers/infiniband/hw/qib/qib_iba6120.c
  53. 3 5
      drivers/infiniband/hw/qib/qib_iba7220.c
  54. 20 21
      drivers/infiniband/hw/qib/qib_iba7322.c
  55. 7 19
      drivers/infiniband/hw/qib/qib_init.c
  56. 4 27
      drivers/infiniband/hw/qib/qib_wc_x86_64.c
  57. 2 2
      drivers/infiniband/ulp/ipoib/ipoib_cm.c
  58. 7 7
      drivers/media/platform/marvell-ccic/mcam-core.c
  59. 4 4
      drivers/media/platform/marvell-ccic/mcam-core.h
  60. 6 1
      drivers/media/platform/soc_camera/rcar_vin.c
  61. 4 6
      drivers/pinctrl/core.c
  62. 1 1
      drivers/pinctrl/core.h
  63. 1 1
      drivers/pinctrl/devicetree.c
  64. 2 0
      drivers/pinctrl/mediatek/pinctrl-mtk-common.c
  65. 1 1
      drivers/pinctrl/mvebu/pinctrl-armada-370.c
  66. 1 0
      drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
  67. 6 4
      drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
  68. 7 0
      drivers/platform/x86/ideapad-laptop.c
  69. 1 1
      drivers/platform/x86/thinkpad_acpi.c
  70. 10 0
      drivers/rtc/Kconfig
  71. 1 0
      drivers/rtc/Makefile
  72. 307 0
      drivers/rtc/rtc-abx80x.c
  73. 12 12
      drivers/rtc/rtc-armada38x.c
  74. 1 0
      drivers/staging/media/omap4iss/Kconfig
  75. 11 0
      drivers/staging/media/omap4iss/iss.c
  76. 4 0
      drivers/staging/media/omap4iss/iss.h
  77. 7 5
      drivers/staging/media/omap4iss/iss_csiphy.c
  78. 17 1
      drivers/tty/hvc/hvc_xen.c
  79. 7 1
      drivers/vfio/pci/vfio_pci.c
  80. 18 3
      drivers/vfio/vfio.c
  81. 10 0
      drivers/xen/events/events_2l.c
  82. 4 3
      drivers/xen/events/events_base.c
  83. 3 25
      drivers/xen/gntdev.c
  84. 28 0
      drivers/xen/grant-table.c
  85. 6 3
      drivers/xen/manage.c
  86. 1 1
      drivers/xen/swiotlb-xen.c
  87. 3 3
      drivers/xen/xen-pciback/conf_space.c
  88. 1 1
      drivers/xen/xen-pciback/conf_space.h
  89. 1 1
      drivers/xen/xen-pciback/conf_space_header.c
  90. 29 0
      drivers/xen/xenbus/xenbus_probe.c
  91. 1 1
      fs/configfs/mount.c
  92. 1 1
      fs/efivarfs/super.c
  93. 7 0
      fs/f2fs/data.c
  94. 1 0
      fs/f2fs/f2fs.h
  95. 3 5
      fs/f2fs/namei.c
  96. 1 0
      fs/f2fs/super.c
  97. 1 1
      fs/nilfs2/btree.c
  98. 13 0
      fs/ocfs2/dlm/dlmmaster.c
  99. 15 1
      include/linux/compiler-gcc.h
  100. 3 0
      include/linux/compiler-intel.h

+ 4 - 1
Documentation/IPMI.txt

@@ -505,7 +505,10 @@ at module load time (for a module) with:
 
 
 The addresses are normal I2C addresses.  The adapter is the string
 The addresses are normal I2C addresses.  The adapter is the string
 name of the adapter, as shown in /sys/class/i2c-adapter/i2c-<n>/name.
 name of the adapter, as shown in /sys/class/i2c-adapter/i2c-<n>/name.
-It is *NOT* i2c-<n> itself.
+It is *NOT* i2c-<n> itself.  Also, the comparison is done ignoring
+spaces, so if the name is "This is an I2C chip" you can say
+adapter_name=ThisisanI2cchip.  This is because it's hard to pass in
+spaces in kernel parameters.
 
 
 The debug flags are bit flags for each BMC found, they are:
 The debug flags are bit flags for each BMC found, they are:
 IPMI messages: 1, driver state: 2, timing: 4, I2C probe: 8
 IPMI messages: 1, driver state: 2, timing: 4, I2C probe: 8

+ 1 - 1
Documentation/acpi/enumeration.txt

@@ -253,7 +253,7 @@ input driver:
 GPIO support
 GPIO support
 ~~~~~~~~~~~~
 ~~~~~~~~~~~~
 ACPI 5 introduced two new resources to describe GPIO connections: GpioIo
 ACPI 5 introduced two new resources to describe GPIO connections: GpioIo
-and GpioInt. These resources are used be used to pass GPIO numbers used by
+and GpioInt. These resources can be used to pass GPIO numbers used by
 the device to the driver. ACPI 5.1 extended this with _DSD (Device
 the device to the driver. ACPI 5.1 extended this with _DSD (Device
 Specific Data) which made it possible to name the GPIOs among other things.
 Specific Data) which made it possible to name the GPIOs among other things.
 
 

+ 3 - 3
Documentation/acpi/gpio-properties.txt

@@ -1,9 +1,9 @@
 _DSD Device Properties Related to GPIO
 _DSD Device Properties Related to GPIO
 --------------------------------------
 --------------------------------------
 
 
-With the release of ACPI 5.1 and the _DSD configuration objecte names
-can finally be given to GPIOs (and other things as well) returned by
-_CRS.  Previously, we were only able to use an integer index to find
+With the release of ACPI 5.1, the _DSD configuration object finally
+allows names to be given to GPIOs (and other things as well) returned
+by _CRS.  Previously, we were only able to use an integer index to find
 the corresponding GPIO, which is pretty error prone (it depends on
 the corresponding GPIO, which is pretty error prone (it depends on
 the _CRS output ordering, for example).
 the _CRS output ordering, for example).
 
 

+ 30 - 0
Documentation/devicetree/bindings/rtc/abracon,abx80x.txt

@@ -0,0 +1,30 @@
+Abracon ABX80X I2C ultra low power RTC/Alarm chip
+
+The Abracon ABX80X family consist of the ab0801, ab0803, ab0804, ab0805, ab1801,
+ab1803, ab1804 and ab1805. The ab0805 is the superset of ab080x and the ab1805
+is the superset of ab180x.
+
+Required properties:
+
+ - "compatible": should one of:
+        "abracon,abx80x"
+        "abracon,ab0801"
+        "abracon,ab0803"
+        "abracon,ab0804"
+        "abracon,ab0805"
+        "abracon,ab1801"
+        "abracon,ab1803"
+        "abracon,ab1804"
+        "abracon,ab1805"
+	Using "abracon,abx80x" will enable chip autodetection.
+ - "reg": I2C bus address of the device
+
+Optional properties:
+
+The abx804 and abx805 have a trickle charger that is able to charge the
+connected battery or supercap. Both the following properties have to be defined
+and valid to enable charging:
+
+ - "abracon,tc-diode": should be "standard" (0.6V) or "schottky" (0.3V)
+ - "abracon,tc-resistor": should be <0>, <3>, <6> or <11>. 0 disables the output
+                          resistor, the other values are in ohm.

+ 5 - 3
Documentation/kasan.txt

@@ -9,7 +9,9 @@ a fast and comprehensive solution for finding use-after-free and out-of-bounds
 bugs.
 bugs.
 
 
 KASan uses compile-time instrumentation for checking every memory access,
 KASan uses compile-time instrumentation for checking every memory access,
-therefore you will need a certain version of GCC > 4.9.2
+therefore you will need a gcc version of 4.9.2 or later. KASan could detect out
+of bounds accesses to stack or global variables, but only if gcc 5.0 or later was
+used to built the kernel.
 
 
 Currently KASan is supported only for x86_64 architecture and requires that the
 Currently KASan is supported only for x86_64 architecture and requires that the
 kernel be built with the SLUB allocator.
 kernel be built with the SLUB allocator.
@@ -23,8 +25,8 @@ To enable KASAN configure kernel with:
 
 
 and choose between CONFIG_KASAN_OUTLINE and CONFIG_KASAN_INLINE. Outline/inline
 and choose between CONFIG_KASAN_OUTLINE and CONFIG_KASAN_INLINE. Outline/inline
 is compiler instrumentation types. The former produces smaller binary the
 is compiler instrumentation types. The former produces smaller binary the
-latter is 1.1 - 2 times faster. Inline instrumentation requires GCC 5.0 or
-latter.
+latter is 1.1 - 2 times faster. Inline instrumentation requires a gcc version
+of 5.0 or later.
 
 
 Currently KASAN works only with the SLUB memory allocator.
 Currently KASAN works only with the SLUB memory allocator.
 For better bug detection and nicer report, enable CONFIG_STACKTRACE and put
 For better bug detection and nicer report, enable CONFIG_STACKTRACE and put

+ 6 - 2
MAINTAINERS

@@ -5042,17 +5042,19 @@ S:	Orphan
 F:	drivers/video/fbdev/imsttfb.c
 F:	drivers/video/fbdev/imsttfb.c
 
 
 INFINIBAND SUBSYSTEM
 INFINIBAND SUBSYSTEM
-M:	Roland Dreier <roland@kernel.org>
+M:	Doug Ledford <dledford@redhat.com>
 M:	Sean Hefty <sean.hefty@intel.com>
 M:	Sean Hefty <sean.hefty@intel.com>
 M:	Hal Rosenstock <hal.rosenstock@gmail.com>
 M:	Hal Rosenstock <hal.rosenstock@gmail.com>
 L:	linux-rdma@vger.kernel.org
 L:	linux-rdma@vger.kernel.org
 W:	http://www.openfabrics.org/
 W:	http://www.openfabrics.org/
 Q:	http://patchwork.kernel.org/project/linux-rdma/list/
 Q:	http://patchwork.kernel.org/project/linux-rdma/list/
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband.git
+T:	git git://github.com/dledford/linux.git
 S:	Supported
 S:	Supported
 F:	Documentation/infiniband/
 F:	Documentation/infiniband/
 F:	drivers/infiniband/
 F:	drivers/infiniband/
 F:	include/uapi/linux/if_infiniband.h
 F:	include/uapi/linux/if_infiniband.h
+F:	include/uapi/rdma/
+F:	include/rdma/
 
 
 INOTIFY
 INOTIFY
 M:	John McCutchan <john@johnmccutchan.com>
 M:	John McCutchan <john@johnmccutchan.com>
@@ -5805,6 +5807,7 @@ F:	drivers/scsi/53c700*
 LED SUBSYSTEM
 LED SUBSYSTEM
 M:	Bryan Wu <cooloney@gmail.com>
 M:	Bryan Wu <cooloney@gmail.com>
 M:	Richard Purdie <rpurdie@rpsys.net>
 M:	Richard Purdie <rpurdie@rpsys.net>
+M:	Jacek Anaszewski <j.anaszewski@samsung.com>
 L:	linux-leds@vger.kernel.org
 L:	linux-leds@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/cooloney/linux-leds.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/cooloney/linux-leds.git
 S:	Maintained
 S:	Maintained
@@ -11037,6 +11040,7 @@ F:	drivers/media/pci/zoran/
 ZRAM COMPRESSED RAM BLOCK DEVICE DRVIER
 ZRAM COMPRESSED RAM BLOCK DEVICE DRVIER
 M:	Minchan Kim <minchan@kernel.org>
 M:	Minchan Kim <minchan@kernel.org>
 M:	Nitin Gupta <ngupta@vflare.org>
 M:	Nitin Gupta <ngupta@vflare.org>
+R:	Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com>
 L:	linux-kernel@vger.kernel.org
 L:	linux-kernel@vger.kernel.org
 S:	Maintained
 S:	Maintained
 F:	drivers/block/zram/
 F:	drivers/block/zram/

+ 1 - 0
arch/arm/include/asm/xen/page.h

@@ -110,5 +110,6 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
 bool xen_arch_need_swiotlb(struct device *dev,
 bool xen_arch_need_swiotlb(struct device *dev,
 			   unsigned long pfn,
 			   unsigned long pfn,
 			   unsigned long mfn);
 			   unsigned long mfn);
+unsigned long xen_get_swiotlb_free_pages(unsigned int order);
 
 
 #endif /* _ASM_ARM_XEN_PAGE_H */
 #endif /* _ASM_ARM_XEN_PAGE_H */

+ 15 - 0
arch/arm/xen/mm.c

@@ -4,6 +4,7 @@
 #include <linux/gfp.h>
 #include <linux/gfp.h>
 #include <linux/highmem.h>
 #include <linux/highmem.h>
 #include <linux/export.h>
 #include <linux/export.h>
+#include <linux/memblock.h>
 #include <linux/of_address.h>
 #include <linux/of_address.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/types.h>
@@ -21,6 +22,20 @@
 #include <asm/xen/hypercall.h>
 #include <asm/xen/hypercall.h>
 #include <asm/xen/interface.h>
 #include <asm/xen/interface.h>
 
 
+unsigned long xen_get_swiotlb_free_pages(unsigned int order)
+{
+	struct memblock_region *reg;
+	gfp_t flags = __GFP_NOWARN;
+
+	for_each_memblock(memory, reg) {
+		if (reg->base < (phys_addr_t)0xffffffff) {
+			flags |= __GFP_DMA;
+			break;
+		}
+	}
+	return __get_free_pages(flags, order);
+}
+
 enum dma_cache_op {
 enum dma_cache_op {
        DMA_UNMAP,
        DMA_UNMAP,
        DMA_MAP,
        DMA_MAP,

+ 2 - 0
arch/x86/boot/compressed/eboot.c

@@ -1109,6 +1109,8 @@ struct boot_params *make_boot_params(struct efi_config *c)
 	if (!cmdline_ptr)
 	if (!cmdline_ptr)
 		goto fail;
 		goto fail;
 	hdr->cmd_line_ptr = (unsigned long)cmdline_ptr;
 	hdr->cmd_line_ptr = (unsigned long)cmdline_ptr;
+	/* Fill in upper bits of command line address, NOP on 32 bit  */
+	boot_params->ext_cmd_line_ptr = (u64)(unsigned long)cmdline_ptr >> 32;
 
 
 	hdr->ramdisk_image = 0;
 	hdr->ramdisk_image = 0;
 	hdr->ramdisk_size = 0;
 	hdr->ramdisk_size = 0;

+ 1 - 1
arch/x86/include/asm/hypervisor.h

@@ -50,7 +50,7 @@ extern const struct hypervisor_x86 *x86_hyper;
 /* Recognized hypervisors */
 /* Recognized hypervisors */
 extern const struct hypervisor_x86 x86_hyper_vmware;
 extern const struct hypervisor_x86 x86_hyper_vmware;
 extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
 extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
-extern const struct hypervisor_x86 x86_hyper_xen_hvm;
+extern const struct hypervisor_x86 x86_hyper_xen;
 extern const struct hypervisor_x86 x86_hyper_kvm;
 extern const struct hypervisor_x86 x86_hyper_kvm;
 
 
 extern void init_hypervisor(struct cpuinfo_x86 *c);
 extern void init_hypervisor(struct cpuinfo_x86 *c);

+ 1 - 1
arch/x86/include/asm/spinlock.h

@@ -169,7 +169,7 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock)
 	struct __raw_tickets tmp = READ_ONCE(lock->tickets);
 	struct __raw_tickets tmp = READ_ONCE(lock->tickets);
 
 
 	tmp.head &= ~TICKET_SLOWPATH_FLAG;
 	tmp.head &= ~TICKET_SLOWPATH_FLAG;
-	return (tmp.tail - tmp.head) > TICKET_LOCK_INC;
+	return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
 }
 }
 #define arch_spin_is_contended	arch_spin_is_contended
 #define arch_spin_is_contended	arch_spin_is_contended
 
 

+ 5 - 0
arch/x86/include/asm/xen/page.h

@@ -269,4 +269,9 @@ static inline bool xen_arch_need_swiotlb(struct device *dev,
 	return false;
 	return false;
 }
 }
 
 
+static inline unsigned long xen_get_swiotlb_free_pages(unsigned int order)
+{
+	return __get_free_pages(__GFP_NOWARN, order);
+}
+
 #endif /* _ASM_X86_XEN_PAGE_H */
 #endif /* _ASM_X86_XEN_PAGE_H */

+ 2 - 2
arch/x86/kernel/cpu/hypervisor.c

@@ -27,8 +27,8 @@
 
 
 static const __initconst struct hypervisor_x86 * const hypervisors[] =
 static const __initconst struct hypervisor_x86 * const hypervisors[] =
 {
 {
-#ifdef CONFIG_XEN_PVHVM
-	&x86_hyper_xen_hvm,
+#ifdef CONFIG_XEN
+	&x86_hyper_xen,
 #endif
 #endif
 	&x86_hyper_vmware,
 	&x86_hyper_vmware,
 	&x86_hyper_ms_hyperv,
 	&x86_hyper_ms_hyperv,

+ 41 - 32
arch/x86/kernel/cpu/perf_event_intel.c

@@ -1134,7 +1134,7 @@ static __initconst const u64 slm_hw_cache_extra_regs
  [ C(LL  ) ] = {
  [ C(LL  ) ] = {
 	[ C(OP_READ) ] = {
 	[ C(OP_READ) ] = {
 		[ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
 		[ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
-		[ C(RESULT_MISS)   ] = SLM_DMND_READ|SLM_LLC_MISS,
+		[ C(RESULT_MISS)   ] = 0,
 	},
 	},
 	[ C(OP_WRITE) ] = {
 	[ C(OP_WRITE) ] = {
 		[ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
 		[ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
@@ -1184,8 +1184,7 @@ static __initconst const u64 slm_hw_cache_event_ids
 	[ C(OP_READ) ] = {
 	[ C(OP_READ) ] = {
 		/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
 		/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
 		[ C(RESULT_ACCESS) ] = 0x01b7,
 		[ C(RESULT_ACCESS) ] = 0x01b7,
-		/* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
-		[ C(RESULT_MISS)   ] = 0x01b7,
+		[ C(RESULT_MISS)   ] = 0,
 	},
 	},
 	[ C(OP_WRITE) ] = {
 	[ C(OP_WRITE) ] = {
 		/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
 		/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
@@ -1217,7 +1216,7 @@ static __initconst const u64 slm_hw_cache_event_ids
  [ C(ITLB) ] = {
  [ C(ITLB) ] = {
 	[ C(OP_READ) ] = {
 	[ C(OP_READ) ] = {
 		[ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
 		[ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
-		[ C(RESULT_MISS)   ] = 0x0282, /* ITLB.MISSES */
+		[ C(RESULT_MISS)   ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
 	},
 	},
 	[ C(OP_WRITE) ] = {
 	[ C(OP_WRITE) ] = {
 		[ C(RESULT_ACCESS) ] = -1,
 		[ C(RESULT_ACCESS) ] = -1,
@@ -2533,34 +2532,6 @@ ssize_t intel_event_sysfs_show(char *page, u64 config)
 	return x86_event_sysfs_show(page, config, event);
 	return x86_event_sysfs_show(page, config, event);
 }
 }
 
 
-static __initconst const struct x86_pmu core_pmu = {
-	.name			= "core",
-	.handle_irq		= x86_pmu_handle_irq,
-	.disable_all		= x86_pmu_disable_all,
-	.enable_all		= core_pmu_enable_all,
-	.enable			= core_pmu_enable_event,
-	.disable		= x86_pmu_disable_event,
-	.hw_config		= x86_pmu_hw_config,
-	.schedule_events	= x86_schedule_events,
-	.eventsel		= MSR_ARCH_PERFMON_EVENTSEL0,
-	.perfctr		= MSR_ARCH_PERFMON_PERFCTR0,
-	.event_map		= intel_pmu_event_map,
-	.max_events		= ARRAY_SIZE(intel_perfmon_event_map),
-	.apic			= 1,
-	/*
-	 * Intel PMCs cannot be accessed sanely above 32 bit width,
-	 * so we install an artificial 1<<31 period regardless of
-	 * the generic event period:
-	 */
-	.max_period		= (1ULL << 31) - 1,
-	.get_event_constraints	= intel_get_event_constraints,
-	.put_event_constraints	= intel_put_event_constraints,
-	.event_constraints	= intel_core_event_constraints,
-	.guest_get_msrs		= core_guest_get_msrs,
-	.format_attrs		= intel_arch_formats_attr,
-	.events_sysfs_show	= intel_event_sysfs_show,
-};
-
 struct intel_shared_regs *allocate_shared_regs(int cpu)
 struct intel_shared_regs *allocate_shared_regs(int cpu)
 {
 {
 	struct intel_shared_regs *regs;
 	struct intel_shared_regs *regs;
@@ -2743,6 +2714,44 @@ static struct attribute *intel_arch3_formats_attr[] = {
 	NULL,
 	NULL,
 };
 };
 
 
+static __initconst const struct x86_pmu core_pmu = {
+	.name			= "core",
+	.handle_irq		= x86_pmu_handle_irq,
+	.disable_all		= x86_pmu_disable_all,
+	.enable_all		= core_pmu_enable_all,
+	.enable			= core_pmu_enable_event,
+	.disable		= x86_pmu_disable_event,
+	.hw_config		= x86_pmu_hw_config,
+	.schedule_events	= x86_schedule_events,
+	.eventsel		= MSR_ARCH_PERFMON_EVENTSEL0,
+	.perfctr		= MSR_ARCH_PERFMON_PERFCTR0,
+	.event_map		= intel_pmu_event_map,
+	.max_events		= ARRAY_SIZE(intel_perfmon_event_map),
+	.apic			= 1,
+	/*
+	 * Intel PMCs cannot be accessed sanely above 32-bit width,
+	 * so we install an artificial 1<<31 period regardless of
+	 * the generic event period:
+	 */
+	.max_period		= (1ULL<<31) - 1,
+	.get_event_constraints	= intel_get_event_constraints,
+	.put_event_constraints	= intel_put_event_constraints,
+	.event_constraints	= intel_core_event_constraints,
+	.guest_get_msrs		= core_guest_get_msrs,
+	.format_attrs		= intel_arch_formats_attr,
+	.events_sysfs_show	= intel_event_sysfs_show,
+
+	/*
+	 * Virtual (or funny metal) CPU can define x86_pmu.extra_regs
+	 * together with PMU version 1 and thus be using core_pmu with
+	 * shared_regs. We need following callbacks here to allocate
+	 * it properly.
+	 */
+	.cpu_prepare		= intel_pmu_cpu_prepare,
+	.cpu_starting		= intel_pmu_cpu_starting,
+	.cpu_dying		= intel_pmu_cpu_dying,
+};
+
 static __initconst const struct x86_pmu intel_pmu = {
 static __initconst const struct x86_pmu intel_pmu = {
 	.name			= "Intel",
 	.name			= "Intel",
 	.handle_irq		= intel_pmu_handle_irq,
 	.handle_irq		= intel_pmu_handle_irq,

+ 1 - 0
arch/x86/kernel/cpu/perf_event_intel_rapl.c

@@ -722,6 +722,7 @@ static int __init rapl_pmu_init(void)
 		break;
 		break;
 	case 60: /* Haswell */
 	case 60: /* Haswell */
 	case 69: /* Haswell-Celeron */
 	case 69: /* Haswell-Celeron */
+	case 61: /* Broadwell */
 		rapl_cntr_mask = RAPL_IDX_HSW;
 		rapl_cntr_mask = RAPL_IDX_HSW;
 		rapl_pmu_events_group.attrs = rapl_events_hsw_attr;
 		rapl_pmu_events_group.attrs = rapl_events_hsw_attr;
 		break;
 		break;

+ 12 - 0
arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c

@@ -1,6 +1,13 @@
 /* Nehalem/SandBridge/Haswell uncore support */
 /* Nehalem/SandBridge/Haswell uncore support */
 #include "perf_event_intel_uncore.h"
 #include "perf_event_intel_uncore.h"
 
 
+/* Uncore IMC PCI IDs */
+#define PCI_DEVICE_ID_INTEL_SNB_IMC	0x0100
+#define PCI_DEVICE_ID_INTEL_IVB_IMC	0x0154
+#define PCI_DEVICE_ID_INTEL_IVB_E3_IMC	0x0150
+#define PCI_DEVICE_ID_INTEL_HSW_IMC	0x0c00
+#define PCI_DEVICE_ID_INTEL_HSW_U_IMC	0x0a04
+
 /* SNB event control */
 /* SNB event control */
 #define SNB_UNC_CTL_EV_SEL_MASK			0x000000ff
 #define SNB_UNC_CTL_EV_SEL_MASK			0x000000ff
 #define SNB_UNC_CTL_UMASK_MASK			0x0000ff00
 #define SNB_UNC_CTL_UMASK_MASK			0x0000ff00
@@ -472,6 +479,10 @@ static const struct pci_device_id hsw_uncore_pci_ids[] = {
 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC),
 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC),
 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 	},
 	},
+	{ /* IMC */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_U_IMC),
+		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+	},
 	{ /* end: all zeroes */ },
 	{ /* end: all zeroes */ },
 };
 };
 
 
@@ -502,6 +513,7 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
 	IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver),    /* 3rd Gen Core processor */
 	IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver),    /* 3rd Gen Core processor */
 	IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */
 	IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */
 	IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver),    /* 4th Gen Core Processor */
 	IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver),    /* 4th Gen Core Processor */
+	IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver),  /* 4th Gen Core ULT Mobile Processor */
 	{  /* end marker */ }
 	{  /* end marker */ }
 };
 };
 
 

+ 8 - 6
arch/x86/kernel/process.c

@@ -57,7 +57,7 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
 	.io_bitmap		= { [0 ... IO_BITMAP_LONGS] = ~0 },
 	.io_bitmap		= { [0 ... IO_BITMAP_LONGS] = ~0 },
 #endif
 #endif
 };
 };
-EXPORT_PER_CPU_SYMBOL_GPL(cpu_tss);
+EXPORT_PER_CPU_SYMBOL(cpu_tss);
 
 
 #ifdef CONFIG_X86_64
 #ifdef CONFIG_X86_64
 static DEFINE_PER_CPU(unsigned char, is_idle);
 static DEFINE_PER_CPU(unsigned char, is_idle);
@@ -156,11 +156,13 @@ void flush_thread(void)
 		/* FPU state will be reallocated lazily at the first use. */
 		/* FPU state will be reallocated lazily at the first use. */
 		drop_fpu(tsk);
 		drop_fpu(tsk);
 		free_thread_xstate(tsk);
 		free_thread_xstate(tsk);
-	} else if (!used_math()) {
-		/* kthread execs. TODO: cleanup this horror. */
-		if (WARN_ON(init_fpu(tsk)))
-			force_sig(SIGKILL, tsk);
-		user_fpu_begin();
+	} else {
+		if (!tsk_used_math(tsk)) {
+			/* kthread execs. TODO: cleanup this horror. */
+			if (WARN_ON(init_fpu(tsk)))
+				force_sig(SIGKILL, tsk);
+			user_fpu_begin();
+		}
 		restore_init_xstate();
 		restore_init_xstate();
 	}
 	}
 }
 }

+ 8 - 6
arch/x86/mm/ioremap.c

@@ -351,18 +351,20 @@ int arch_ioremap_pmd_supported(void)
  */
  */
 void *xlate_dev_mem_ptr(phys_addr_t phys)
 void *xlate_dev_mem_ptr(phys_addr_t phys)
 {
 {
-	void *addr;
-	unsigned long start = phys & PAGE_MASK;
+	unsigned long start  = phys &  PAGE_MASK;
+	unsigned long offset = phys & ~PAGE_MASK;
+	unsigned long vaddr;
 
 
 	/* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
 	/* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
 	if (page_is_ram(start >> PAGE_SHIFT))
 	if (page_is_ram(start >> PAGE_SHIFT))
 		return __va(phys);
 		return __va(phys);
 
 
-	addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
-	if (addr)
-		addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
+	vaddr = (unsigned long)ioremap_cache(start, PAGE_SIZE);
+	/* Only add the offset on success and return NULL if the ioremap() failed: */
+	if (vaddr)
+		vaddr += offset;
 
 
-	return addr;
+	return (void *)vaddr;
 }
 }
 
 
 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)

+ 22 - 2
arch/x86/pci/acpi.c

@@ -325,6 +325,26 @@ static void release_pci_root_info(struct pci_host_bridge *bridge)
 	kfree(info);
 	kfree(info);
 }
 }
 
 
+/*
+ * An IO port or MMIO resource assigned to a PCI host bridge may be
+ * consumed by the host bridge itself or available to its child
+ * bus/devices. The ACPI specification defines a bit (Producer/Consumer)
+ * to tell whether the resource is consumed by the host bridge itself,
+ * but firmware hasn't used that bit consistently, so we can't rely on it.
+ *
+ * On x86 and IA64 platforms, all IO port and MMIO resources are assumed
+ * to be available to child bus/devices except one special case:
+ *     IO port [0xCF8-0xCFF] is consumed by the host bridge itself
+ *     to access PCI configuration space.
+ *
+ * So explicitly filter out PCI CFG IO ports[0xCF8-0xCFF].
+ */
+static bool resource_is_pcicfg_ioport(struct resource *res)
+{
+	return (res->flags & IORESOURCE_IO) &&
+		res->start == 0xCF8 && res->end == 0xCFF;
+}
+
 static void probe_pci_root_info(struct pci_root_info *info,
 static void probe_pci_root_info(struct pci_root_info *info,
 				struct acpi_device *device,
 				struct acpi_device *device,
 				int busnum, int domain,
 				int busnum, int domain,
@@ -346,8 +366,8 @@ static void probe_pci_root_info(struct pci_root_info *info,
 			"no IO and memory resources present in _CRS\n");
 			"no IO and memory resources present in _CRS\n");
 	else
 	else
 		resource_list_for_each_entry_safe(entry, tmp, list) {
 		resource_list_for_each_entry_safe(entry, tmp, list) {
-			if ((entry->res->flags & IORESOURCE_WINDOW) == 0 ||
-			    (entry->res->flags & IORESOURCE_DISABLED))
+			if ((entry->res->flags & IORESOURCE_DISABLED) ||
+			    resource_is_pcicfg_ioport(entry->res))
 				resource_list_destroy_entry(entry);
 				resource_list_destroy_entry(entry);
 			else
 			else
 				entry->res->name = info->name;
 				entry->res->name = info->name;

+ 18 - 9
arch/x86/xen/enlighten.c

@@ -1760,6 +1760,9 @@ static struct notifier_block xen_hvm_cpu_notifier = {
 
 
 static void __init xen_hvm_guest_init(void)
 static void __init xen_hvm_guest_init(void)
 {
 {
+	if (xen_pv_domain())
+		return;
+
 	init_hvm_pv_info();
 	init_hvm_pv_info();
 
 
 	xen_hvm_init_shared_info();
 	xen_hvm_init_shared_info();
@@ -1775,6 +1778,7 @@ static void __init xen_hvm_guest_init(void)
 	xen_hvm_init_time_ops();
 	xen_hvm_init_time_ops();
 	xen_hvm_init_mmu_ops();
 	xen_hvm_init_mmu_ops();
 }
 }
+#endif
 
 
 static bool xen_nopv = false;
 static bool xen_nopv = false;
 static __init int xen_parse_nopv(char *arg)
 static __init int xen_parse_nopv(char *arg)
@@ -1784,14 +1788,11 @@ static __init int xen_parse_nopv(char *arg)
 }
 }
 early_param("xen_nopv", xen_parse_nopv);
 early_param("xen_nopv", xen_parse_nopv);
 
 
-static uint32_t __init xen_hvm_platform(void)
+static uint32_t __init xen_platform(void)
 {
 {
 	if (xen_nopv)
 	if (xen_nopv)
 		return 0;
 		return 0;
 
 
-	if (xen_pv_domain())
-		return 0;
-
 	return xen_cpuid_base();
 	return xen_cpuid_base();
 }
 }
 
 
@@ -1809,11 +1810,19 @@ bool xen_hvm_need_lapic(void)
 }
 }
 EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
 EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
 
 
-const struct hypervisor_x86 x86_hyper_xen_hvm __refconst = {
-	.name			= "Xen HVM",
-	.detect			= xen_hvm_platform,
+static void xen_set_cpu_features(struct cpuinfo_x86 *c)
+{
+	if (xen_pv_domain())
+		clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+}
+
+const struct hypervisor_x86 x86_hyper_xen = {
+	.name			= "Xen",
+	.detect			= xen_platform,
+#ifdef CONFIG_XEN_PVHVM
 	.init_platform		= xen_hvm_guest_init,
 	.init_platform		= xen_hvm_guest_init,
+#endif
 	.x2apic_available	= xen_x2apic_para_available,
 	.x2apic_available	= xen_x2apic_para_available,
+	.set_cpu_features       = xen_set_cpu_features,
 };
 };
-EXPORT_SYMBOL(x86_hyper_xen_hvm);
-#endif
+EXPORT_SYMBOL(x86_hyper_xen);

+ 10 - 0
arch/x86/xen/suspend.c

@@ -88,7 +88,17 @@ static void xen_vcpu_notify_restore(void *data)
 	tick_resume_local();
 	tick_resume_local();
 }
 }
 
 
+static void xen_vcpu_notify_suspend(void *data)
+{
+	tick_suspend_local();
+}
+
 void xen_arch_resume(void)
 void xen_arch_resume(void)
 {
 {
 	on_each_cpu(xen_vcpu_notify_restore, NULL, 1);
 	on_each_cpu(xen_vcpu_notify_restore, NULL, 1);
 }
 }
+
+void xen_arch_suspend(void)
+{
+	on_each_cpu(xen_vcpu_notify_suspend, NULL, 1);
+}

+ 2 - 0
drivers/acpi/acpi_pnp.c

@@ -304,6 +304,8 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = {
 	{"PNPb006"},
 	{"PNPb006"},
 	/* cs423x-pnpbios */
 	/* cs423x-pnpbios */
 	{"CSC0100"},
 	{"CSC0100"},
+	{"CSC0103"},
+	{"CSC0110"},
 	{"CSC0000"},
 	{"CSC0000"},
 	{"GIM0100"},		/* Guillemot Turtlebeach something appears to be cs4232 compatible */
 	{"GIM0100"},		/* Guillemot Turtlebeach something appears to be cs4232 compatible */
 	/* es18xx-pnpbios */
 	/* es18xx-pnpbios */

+ 1 - 1
drivers/acpi/resource.c

@@ -573,7 +573,7 @@ EXPORT_SYMBOL_GPL(acpi_dev_get_resources);
  * @ares: Input ACPI resource object.
  * @ares: Input ACPI resource object.
  * @types: Valid resource types of IORESOURCE_XXX
  * @types: Valid resource types of IORESOURCE_XXX
  *
  *
- * This is a hepler function to support acpi_dev_get_resources(), which filters
+ * This is a helper function to support acpi_dev_get_resources(), which filters
  * ACPI resource objects according to resource types.
  * ACPI resource objects according to resource types.
  */
  */
 int acpi_dev_filter_resource_type(struct acpi_resource *ares,
 int acpi_dev_filter_resource_type(struct acpi_resource *ares,

+ 22 - 0
drivers/acpi/sbshc.c

@@ -14,6 +14,7 @@
 #include <linux/delay.h>
 #include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
+#include <linux/dmi.h>
 #include "sbshc.h"
 #include "sbshc.h"
 
 
 #define PREFIX "ACPI: "
 #define PREFIX "ACPI: "
@@ -87,6 +88,8 @@ enum acpi_smb_offset {
 	ACPI_SMB_ALARM_DATA = 0x26,	/* 2 bytes alarm data */
 	ACPI_SMB_ALARM_DATA = 0x26,	/* 2 bytes alarm data */
 };
 };
 
 
+static bool macbook;
+
 static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data)
 static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data)
 {
 {
 	return ec_read(hc->offset + address, data);
 	return ec_read(hc->offset + address, data);
@@ -132,6 +135,8 @@ static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol,
 	}
 	}
 
 
 	mutex_lock(&hc->lock);
 	mutex_lock(&hc->lock);
+	if (macbook)
+		udelay(5);
 	if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp))
 	if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp))
 		goto end;
 		goto end;
 	if (temp) {
 	if (temp) {
@@ -257,12 +262,29 @@ extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
 			      acpi_handle handle, acpi_ec_query_func func,
 			      acpi_handle handle, acpi_ec_query_func func,
 			      void *data);
 			      void *data);
 
 
+static int macbook_dmi_match(const struct dmi_system_id *d)
+{
+	pr_debug("Detected MacBook, enabling workaround\n");
+	macbook = true;
+	return 0;
+}
+
+static struct dmi_system_id acpi_smbus_dmi_table[] = {
+	{ macbook_dmi_match, "Apple MacBook", {
+	  DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
+	  DMI_MATCH(DMI_PRODUCT_NAME, "MacBook") },
+	},
+	{ },
+};
+
 static int acpi_smbus_hc_add(struct acpi_device *device)
 static int acpi_smbus_hc_add(struct acpi_device *device)
 {
 {
 	int status;
 	int status;
 	unsigned long long val;
 	unsigned long long val;
 	struct acpi_smb_hc *hc;
 	struct acpi_smb_hc *hc;
 
 
+	dmi_check_system(acpi_smbus_dmi_table);
+
 	if (!device)
 	if (!device)
 		return -EINVAL;
 		return -EINVAL;
 
 

+ 11 - 24
drivers/block/xen-blkback/blkback.c

@@ -265,17 +265,6 @@ static void put_persistent_gnt(struct xen_blkif *blkif,
 	atomic_dec(&blkif->persistent_gnt_in_use);
 	atomic_dec(&blkif->persistent_gnt_in_use);
 }
 }
 
 
-static void free_persistent_gnts_unmap_callback(int result,
-						struct gntab_unmap_queue_data *data)
-{
-	struct completion *c = data->data;
-
-	/* BUG_ON used to reproduce existing behaviour,
-	   but is this the best way to deal with this? */
-	BUG_ON(result);
-	complete(c);
-}
-
 static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
 static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
                                  unsigned int num)
                                  unsigned int num)
 {
 {
@@ -285,12 +274,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
 	struct rb_node *n;
 	struct rb_node *n;
 	int segs_to_unmap = 0;
 	int segs_to_unmap = 0;
 	struct gntab_unmap_queue_data unmap_data;
 	struct gntab_unmap_queue_data unmap_data;
-	struct completion unmap_completion;
 
 
-	init_completion(&unmap_completion);
-
-	unmap_data.data = &unmap_completion;
-	unmap_data.done = &free_persistent_gnts_unmap_callback;
 	unmap_data.pages = pages;
 	unmap_data.pages = pages;
 	unmap_data.unmap_ops = unmap;
 	unmap_data.unmap_ops = unmap;
 	unmap_data.kunmap_ops = NULL;
 	unmap_data.kunmap_ops = NULL;
@@ -310,8 +294,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
 			!rb_next(&persistent_gnt->node)) {
 			!rb_next(&persistent_gnt->node)) {
 
 
 			unmap_data.count = segs_to_unmap;
 			unmap_data.count = segs_to_unmap;
-			gnttab_unmap_refs_async(&unmap_data);
-			wait_for_completion(&unmap_completion);
+			BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
 
 
 			put_free_pages(blkif, pages, segs_to_unmap);
 			put_free_pages(blkif, pages, segs_to_unmap);
 			segs_to_unmap = 0;
 			segs_to_unmap = 0;
@@ -329,8 +312,13 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work)
 	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 	struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 	struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 	struct persistent_gnt *persistent_gnt;
 	struct persistent_gnt *persistent_gnt;
-	int ret, segs_to_unmap = 0;
+	int segs_to_unmap = 0;
 	struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work);
 	struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work);
+	struct gntab_unmap_queue_data unmap_data;
+
+	unmap_data.pages = pages;
+	unmap_data.unmap_ops = unmap;
+	unmap_data.kunmap_ops = NULL;
 
 
 	while(!list_empty(&blkif->persistent_purge_list)) {
 	while(!list_empty(&blkif->persistent_purge_list)) {
 		persistent_gnt = list_first_entry(&blkif->persistent_purge_list,
 		persistent_gnt = list_first_entry(&blkif->persistent_purge_list,
@@ -346,17 +334,16 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work)
 		pages[segs_to_unmap] = persistent_gnt->page;
 		pages[segs_to_unmap] = persistent_gnt->page;
 
 
 		if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
 		if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
-			ret = gnttab_unmap_refs(unmap, NULL, pages,
-				segs_to_unmap);
-			BUG_ON(ret);
+			unmap_data.count = segs_to_unmap;
+			BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
 			put_free_pages(blkif, pages, segs_to_unmap);
 			put_free_pages(blkif, pages, segs_to_unmap);
 			segs_to_unmap = 0;
 			segs_to_unmap = 0;
 		}
 		}
 		kfree(persistent_gnt);
 		kfree(persistent_gnt);
 	}
 	}
 	if (segs_to_unmap > 0) {
 	if (segs_to_unmap > 0) {
-		ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap);
-		BUG_ON(ret);
+		unmap_data.count = segs_to_unmap;
+		BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
 		put_free_pages(blkif, pages, segs_to_unmap);
 		put_free_pages(blkif, pages, segs_to_unmap);
 	}
 	}
 }
 }

+ 23 - 0
drivers/block/zram/zram_drv.c

@@ -74,6 +74,27 @@ static inline struct zram *dev_to_zram(struct device *dev)
 	return (struct zram *)dev_to_disk(dev)->private_data;
 	return (struct zram *)dev_to_disk(dev)->private_data;
 }
 }
 
 
+static ssize_t compact_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t len)
+{
+	unsigned long nr_migrated;
+	struct zram *zram = dev_to_zram(dev);
+	struct zram_meta *meta;
+
+	down_read(&zram->init_lock);
+	if (!init_done(zram)) {
+		up_read(&zram->init_lock);
+		return -EINVAL;
+	}
+
+	meta = zram->meta;
+	nr_migrated = zs_compact(meta->mem_pool);
+	atomic64_add(nr_migrated, &zram->stats.num_migrated);
+	up_read(&zram->init_lock);
+
+	return len;
+}
+
 static ssize_t disksize_show(struct device *dev,
 static ssize_t disksize_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 		struct device_attribute *attr, char *buf)
 {
 {
@@ -1038,6 +1059,7 @@ static const struct block_device_operations zram_devops = {
 	.owner = THIS_MODULE
 	.owner = THIS_MODULE
 };
 };
 
 
+static DEVICE_ATTR_WO(compact);
 static DEVICE_ATTR_RW(disksize);
 static DEVICE_ATTR_RW(disksize);
 static DEVICE_ATTR_RO(initstate);
 static DEVICE_ATTR_RO(initstate);
 static DEVICE_ATTR_WO(reset);
 static DEVICE_ATTR_WO(reset);
@@ -1114,6 +1136,7 @@ static struct attribute *zram_disk_attrs[] = {
 	&dev_attr_num_writes.attr,
 	&dev_attr_num_writes.attr,
 	&dev_attr_failed_reads.attr,
 	&dev_attr_failed_reads.attr,
 	&dev_attr_failed_writes.attr,
 	&dev_attr_failed_writes.attr,
+	&dev_attr_compact.attr,
 	&dev_attr_invalid_io.attr,
 	&dev_attr_invalid_io.attr,
 	&dev_attr_notify_free.attr,
 	&dev_attr_notify_free.attr,
 	&dev_attr_zero_pages.attr,
 	&dev_attr_zero_pages.attr,

+ 9 - 9
drivers/char/hw_random/bcm63xx-rng.c

@@ -57,7 +57,7 @@ static void bcm63xx_rng_cleanup(struct hwrng *rng)
 	val &= ~RNG_EN;
 	val &= ~RNG_EN;
 	__raw_writel(val, priv->regs + RNG_CTRL);
 	__raw_writel(val, priv->regs + RNG_CTRL);
 
 
-	clk_didsable_unprepare(prov->clk);
+	clk_disable_unprepare(priv->clk);
 }
 }
 
 
 static int bcm63xx_rng_data_present(struct hwrng *rng, int wait)
 static int bcm63xx_rng_data_present(struct hwrng *rng, int wait)
@@ -97,14 +97,14 @@ static int bcm63xx_rng_probe(struct platform_device *pdev)
 	priv->rng.name = pdev->name;
 	priv->rng.name = pdev->name;
 	priv->rng.init = bcm63xx_rng_init;
 	priv->rng.init = bcm63xx_rng_init;
 	priv->rng.cleanup = bcm63xx_rng_cleanup;
 	priv->rng.cleanup = bcm63xx_rng_cleanup;
-	prov->rng.data_present = bcm63xx_rng_data_present;
+	priv->rng.data_present = bcm63xx_rng_data_present;
 	priv->rng.data_read = bcm63xx_rng_data_read;
 	priv->rng.data_read = bcm63xx_rng_data_read;
 
 
 	priv->clk = devm_clk_get(&pdev->dev, "ipsec");
 	priv->clk = devm_clk_get(&pdev->dev, "ipsec");
 	if (IS_ERR(priv->clk)) {
 	if (IS_ERR(priv->clk)) {
-		error = PTR_ERR(priv->clk);
-		dev_err(&pdev->dev, "no clock for device: %d\n", error);
-		return error;
+		ret = PTR_ERR(priv->clk);
+		dev_err(&pdev->dev, "no clock for device: %d\n", ret);
+		return ret;
 	}
 	}
 
 
 	if (!devm_request_mem_region(&pdev->dev, r->start,
 	if (!devm_request_mem_region(&pdev->dev, r->start,
@@ -120,11 +120,11 @@ static int bcm63xx_rng_probe(struct platform_device *pdev)
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
 
 
-	error = devm_hwrng_register(&pdev->dev, &priv->rng);
-	if (error) {
+	ret = devm_hwrng_register(&pdev->dev, &priv->rng);
+	if (ret) {
 		dev_err(&pdev->dev, "failed to register rng device: %d\n",
 		dev_err(&pdev->dev, "failed to register rng device: %d\n",
-			error);
-		return error;
+			ret);
+		return ret;
 	}
 	}
 
 
 	dev_info(&pdev->dev, "registered RNG driver\n");
 	dev_info(&pdev->dev, "registered RNG driver\n");

+ 2 - 2
drivers/char/ipmi/ipmi_msghandler.c

@@ -2000,7 +2000,7 @@ static int smi_ipmb_proc_show(struct seq_file *m, void *v)
 		seq_printf(m, " %x", intf->channels[i].address);
 		seq_printf(m, " %x", intf->channels[i].address);
 	seq_putc(m, '\n');
 	seq_putc(m, '\n');
 
 
-	return seq_has_overflowed(m);
+	return 0;
 }
 }
 
 
 static int smi_ipmb_proc_open(struct inode *inode, struct file *file)
 static int smi_ipmb_proc_open(struct inode *inode, struct file *file)
@@ -2023,7 +2023,7 @@ static int smi_version_proc_show(struct seq_file *m, void *v)
 		   ipmi_version_major(&intf->bmc->id),
 		   ipmi_version_major(&intf->bmc->id),
 		   ipmi_version_minor(&intf->bmc->id));
 		   ipmi_version_minor(&intf->bmc->id));
 
 
-	return seq_has_overflowed(m);
+	return 0;
 }
 }
 
 
 static int smi_version_proc_open(struct inode *inode, struct file *file)
 static int smi_version_proc_open(struct inode *inode, struct file *file)

+ 9 - 7
drivers/char/ipmi/ipmi_si_intf.c

@@ -942,8 +942,7 @@ static void sender(void                *send_info,
 		 * If we are running to completion, start it and run
 		 * If we are running to completion, start it and run
 		 * transactions until everything is clear.
 		 * transactions until everything is clear.
 		 */
 		 */
-		smi_info->curr_msg = msg;
-		smi_info->waiting_msg = NULL;
+		smi_info->waiting_msg = msg;
 
 
 		/*
 		/*
 		 * Run to completion means we are single-threaded, no
 		 * Run to completion means we are single-threaded, no
@@ -2244,7 +2243,7 @@ static int ipmi_pnp_probe(struct pnp_dev *dev,
 	acpi_handle handle;
 	acpi_handle handle;
 	acpi_status status;
 	acpi_status status;
 	unsigned long long tmp;
 	unsigned long long tmp;
-	int rv;
+	int rv = -EINVAL;
 
 
 	acpi_dev = pnp_acpi_device(dev);
 	acpi_dev = pnp_acpi_device(dev);
 	if (!acpi_dev)
 	if (!acpi_dev)
@@ -2262,8 +2261,10 @@ static int ipmi_pnp_probe(struct pnp_dev *dev,
 
 
 	/* _IFT tells us the interface type: KCS, BT, etc */
 	/* _IFT tells us the interface type: KCS, BT, etc */
 	status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp);
 	status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp);
-	if (ACPI_FAILURE(status))
+	if (ACPI_FAILURE(status)) {
+		dev_err(&dev->dev, "Could not find ACPI IPMI interface type\n");
 		goto err_free;
 		goto err_free;
+	}
 
 
 	switch (tmp) {
 	switch (tmp) {
 	case 1:
 	case 1:
@@ -2276,6 +2277,7 @@ static int ipmi_pnp_probe(struct pnp_dev *dev,
 		info->si_type = SI_BT;
 		info->si_type = SI_BT;
 		break;
 		break;
 	case 4: /* SSIF, just ignore */
 	case 4: /* SSIF, just ignore */
+		rv = -ENODEV;
 		goto err_free;
 		goto err_free;
 	default:
 	default:
 		dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp);
 		dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp);
@@ -2336,7 +2338,7 @@ static int ipmi_pnp_probe(struct pnp_dev *dev,
 
 
 err_free:
 err_free:
 	kfree(info);
 	kfree(info);
-	return -EINVAL;
+	return rv;
 }
 }
 
 
 static void ipmi_pnp_remove(struct pnp_dev *dev)
 static void ipmi_pnp_remove(struct pnp_dev *dev)
@@ -3080,7 +3082,7 @@ static int smi_type_proc_show(struct seq_file *m, void *v)
 
 
 	seq_printf(m, "%s\n", si_to_str[smi->si_type]);
 	seq_printf(m, "%s\n", si_to_str[smi->si_type]);
 
 
-	return seq_has_overflowed(m);
+	return 0;
 }
 }
 
 
 static int smi_type_proc_open(struct inode *inode, struct file *file)
 static int smi_type_proc_open(struct inode *inode, struct file *file)
@@ -3153,7 +3155,7 @@ static int smi_params_proc_show(struct seq_file *m, void *v)
 		   smi->irq,
 		   smi->irq,
 		   smi->slave_addr);
 		   smi->slave_addr);
 
 
-	return seq_has_overflowed(m);
+	return 0;
 }
 }
 
 
 static int smi_params_proc_open(struct inode *inode, struct file *file)
 static int smi_params_proc_open(struct inode *inode, struct file *file)

+ 178 - 35
drivers/char/ipmi/ipmi_ssif.c

@@ -31,7 +31,6 @@
  * interface into the I2C driver, I believe.
  * interface into the I2C driver, I believe.
  */
  */
 
 
-#include <linux/version.h>
 #if defined(MODVERSIONS)
 #if defined(MODVERSIONS)
 #include <linux/modversions.h>
 #include <linux/modversions.h>
 #endif
 #endif
@@ -166,6 +165,9 @@ enum ssif_stat_indexes {
 	/* Number of watchdog pretimeouts. */
 	/* Number of watchdog pretimeouts. */
 	SSIF_STAT_watchdog_pretimeouts,
 	SSIF_STAT_watchdog_pretimeouts,
 
 
+	/* Number of alers received. */
+	SSIF_STAT_alerts,
+
 	/* Always add statistics before this value, it must be last. */
 	/* Always add statistics before this value, it must be last. */
 	SSIF_NUM_STATS
 	SSIF_NUM_STATS
 };
 };
@@ -214,7 +216,16 @@ struct ssif_info {
 #define WDT_PRE_TIMEOUT_INT	0x08
 #define WDT_PRE_TIMEOUT_INT	0x08
 	unsigned char       msg_flags;
 	unsigned char       msg_flags;
 
 
+	u8		    global_enables;
 	bool		    has_event_buffer;
 	bool		    has_event_buffer;
+	bool		    supports_alert;
+
+	/*
+	 * Used to tell what we should do with alerts.  If we are
+	 * waiting on a response, read the data immediately.
+	 */
+	bool		    got_alert;
+	bool		    waiting_alert;
 
 
 	/*
 	/*
 	 * If set to true, this will request events the next time the
 	 * If set to true, this will request events the next time the
@@ -478,13 +489,13 @@ static int ipmi_ssif_thread(void *data)
 
 
 		if (ssif_info->i2c_read_write == I2C_SMBUS_WRITE) {
 		if (ssif_info->i2c_read_write == I2C_SMBUS_WRITE) {
 			result = i2c_smbus_write_block_data(
 			result = i2c_smbus_write_block_data(
-				ssif_info->client, SSIF_IPMI_REQUEST,
+				ssif_info->client, ssif_info->i2c_command,
 				ssif_info->i2c_data[0],
 				ssif_info->i2c_data[0],
 				ssif_info->i2c_data + 1);
 				ssif_info->i2c_data + 1);
 			ssif_info->done_handler(ssif_info, result, NULL, 0);
 			ssif_info->done_handler(ssif_info, result, NULL, 0);
 		} else {
 		} else {
 			result = i2c_smbus_read_block_data(
 			result = i2c_smbus_read_block_data(
-				ssif_info->client, SSIF_IPMI_RESPONSE,
+				ssif_info->client, ssif_info->i2c_command,
 				ssif_info->i2c_data);
 				ssif_info->i2c_data);
 			if (result < 0)
 			if (result < 0)
 				ssif_info->done_handler(ssif_info, result,
 				ssif_info->done_handler(ssif_info, result,
@@ -518,15 +529,12 @@ static int ssif_i2c_send(struct ssif_info *ssif_info,
 static void msg_done_handler(struct ssif_info *ssif_info, int result,
 static void msg_done_handler(struct ssif_info *ssif_info, int result,
 			     unsigned char *data, unsigned int len);
 			     unsigned char *data, unsigned int len);
 
 
-static void retry_timeout(unsigned long data)
+static void start_get(struct ssif_info *ssif_info)
 {
 {
-	struct ssif_info *ssif_info = (void *) data;
 	int rv;
 	int rv;
 
 
-	if (ssif_info->stopping)
-		return;
-
 	ssif_info->rtc_us_timer = 0;
 	ssif_info->rtc_us_timer = 0;
+	ssif_info->multi_pos = 0;
 
 
 	rv = ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
 	rv = ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
 			  SSIF_IPMI_RESPONSE,
 			  SSIF_IPMI_RESPONSE,
@@ -540,6 +548,46 @@ static void retry_timeout(unsigned long data)
 	}
 	}
 }
 }
 
 
+static void retry_timeout(unsigned long data)
+{
+	struct ssif_info *ssif_info = (void *) data;
+	unsigned long oflags, *flags;
+	bool waiting;
+
+	if (ssif_info->stopping)
+		return;
+
+	flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+	waiting = ssif_info->waiting_alert;
+	ssif_info->waiting_alert = false;
+	ipmi_ssif_unlock_cond(ssif_info, flags);
+
+	if (waiting)
+		start_get(ssif_info);
+}
+
+
+static void ssif_alert(struct i2c_client *client, unsigned int data)
+{
+	struct ssif_info *ssif_info = i2c_get_clientdata(client);
+	unsigned long oflags, *flags;
+	bool do_get = false;
+
+	ssif_inc_stat(ssif_info, alerts);
+
+	flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+	if (ssif_info->waiting_alert) {
+		ssif_info->waiting_alert = false;
+		del_timer(&ssif_info->retry_timer);
+		do_get = true;
+	} else if (ssif_info->curr_msg) {
+		ssif_info->got_alert = true;
+	}
+	ipmi_ssif_unlock_cond(ssif_info, flags);
+	if (do_get)
+		start_get(ssif_info);
+}
+
 static int start_resend(struct ssif_info *ssif_info);
 static int start_resend(struct ssif_info *ssif_info);
 
 
 static void msg_done_handler(struct ssif_info *ssif_info, int result,
 static void msg_done_handler(struct ssif_info *ssif_info, int result,
@@ -559,9 +607,12 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
 		if (ssif_info->retries_left > 0) {
 		if (ssif_info->retries_left > 0) {
 			ssif_inc_stat(ssif_info, receive_retries);
 			ssif_inc_stat(ssif_info, receive_retries);
 
 
+			flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+			ssif_info->waiting_alert = true;
+			ssif_info->rtc_us_timer = SSIF_MSG_USEC;
 			mod_timer(&ssif_info->retry_timer,
 			mod_timer(&ssif_info->retry_timer,
 				  jiffies + SSIF_MSG_JIFFIES);
 				  jiffies + SSIF_MSG_JIFFIES);
-			ssif_info->rtc_us_timer = SSIF_MSG_USEC;
+			ipmi_ssif_unlock_cond(ssif_info, flags);
 			return;
 			return;
 		}
 		}
 
 
@@ -581,9 +632,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
 		ssif_inc_stat(ssif_info, received_message_parts);
 		ssif_inc_stat(ssif_info, received_message_parts);
 
 
 		/* Remove the multi-part read marker. */
 		/* Remove the multi-part read marker. */
-		for (i = 0; i < (len-2); i++)
-			ssif_info->data[i] = data[i+2];
 		len -= 2;
 		len -= 2;
+		for (i = 0; i < len; i++)
+			ssif_info->data[i] = data[i+2];
 		ssif_info->multi_len = len;
 		ssif_info->multi_len = len;
 		ssif_info->multi_pos = 1;
 		ssif_info->multi_pos = 1;
 
 
@@ -610,9 +661,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
 			goto continue_op;
 			goto continue_op;
 		}
 		}
 
 
-		blocknum = data[ssif_info->multi_len];
+		blocknum = data[0];
 
 
-		if (ssif_info->multi_len+len-1 > IPMI_MAX_MSG_LENGTH) {
+		if (ssif_info->multi_len + len - 1 > IPMI_MAX_MSG_LENGTH) {
 			/* Received message too big, abort the operation. */
 			/* Received message too big, abort the operation. */
 			result = -E2BIG;
 			result = -E2BIG;
 			if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
 			if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
@@ -622,15 +673,15 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
 		}
 		}
 
 
 		/* Remove the blocknum from the data. */
 		/* Remove the blocknum from the data. */
-		for (i = 0; i < (len-1); i++)
-			ssif_info->data[i+ssif_info->multi_len] = data[i+1];
 		len--;
 		len--;
+		for (i = 0; i < len; i++)
+			ssif_info->data[i + ssif_info->multi_len] = data[i + 1];
 		ssif_info->multi_len += len;
 		ssif_info->multi_len += len;
 		if (blocknum == 0xff) {
 		if (blocknum == 0xff) {
 			/* End of read */
 			/* End of read */
 			len = ssif_info->multi_len;
 			len = ssif_info->multi_len;
 			data = ssif_info->data;
 			data = ssif_info->data;
-		} else if ((blocknum+1) != ssif_info->multi_pos) {
+		} else if (blocknum + 1 != ssif_info->multi_pos) {
 			/*
 			/*
 			 * Out of sequence block, just abort.  Block
 			 * Out of sequence block, just abort.  Block
 			 * numbers start at zero for the second block,
 			 * numbers start at zero for the second block,
@@ -650,7 +701,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
 			if (rv < 0) {
 			if (rv < 0) {
 				if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
 				if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
 					pr_info(PFX
 					pr_info(PFX
-						"Error from i2c_non_blocking_op(2)\n");
+						"Error from ssif_i2c_send\n");
 
 
 				result = -EIO;
 				result = -EIO;
 			} else
 			} else
@@ -830,7 +881,11 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
 	}
 	}
 
 
 	if (ssif_info->multi_data) {
 	if (ssif_info->multi_data) {
-		/* In the middle of a multi-data write. */
+		/*
+		 * In the middle of a multi-data write.  See the comment
+		 * in the SSIF_MULTI_n_PART case in the probe function
+		 * for details on the intricacies of this.
+		 */
 		int left;
 		int left;
 
 
 		ssif_inc_stat(ssif_info, sent_messages_parts);
 		ssif_inc_stat(ssif_info, sent_messages_parts);
@@ -864,15 +919,32 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
 			msg_done_handler(ssif_info, -EIO, NULL, 0);
 			msg_done_handler(ssif_info, -EIO, NULL, 0);
 		}
 		}
 	} else {
 	} else {
+		unsigned long oflags, *flags;
+		bool got_alert;
+
 		ssif_inc_stat(ssif_info, sent_messages);
 		ssif_inc_stat(ssif_info, sent_messages);
 		ssif_inc_stat(ssif_info, sent_messages_parts);
 		ssif_inc_stat(ssif_info, sent_messages_parts);
 
 
-		/* Wait a jiffie then request the next message */
-		ssif_info->retries_left = SSIF_RECV_RETRIES;
-		ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC;
-		mod_timer(&ssif_info->retry_timer,
-			  jiffies + SSIF_MSG_PART_JIFFIES);
-		return;
+		flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+		got_alert = ssif_info->got_alert;
+		if (got_alert) {
+			ssif_info->got_alert = false;
+			ssif_info->waiting_alert = false;
+		}
+
+		if (got_alert) {
+			ipmi_ssif_unlock_cond(ssif_info, flags);
+			/* The alert already happened, try now. */
+			retry_timeout((unsigned long) ssif_info);
+		} else {
+			/* Wait a jiffie then request the next message */
+			ssif_info->waiting_alert = true;
+			ssif_info->retries_left = SSIF_RECV_RETRIES;
+			ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC;
+			mod_timer(&ssif_info->retry_timer,
+				  jiffies + SSIF_MSG_PART_JIFFIES);
+			ipmi_ssif_unlock_cond(ssif_info, flags);
+		}
 	}
 	}
 }
 }
 
 
@@ -881,6 +953,8 @@ static int start_resend(struct ssif_info *ssif_info)
 	int rv;
 	int rv;
 	int command;
 	int command;
 
 
+	ssif_info->got_alert = false;
+
 	if (ssif_info->data_len > 32) {
 	if (ssif_info->data_len > 32) {
 		command = SSIF_IPMI_MULTI_PART_REQUEST_START;
 		command = SSIF_IPMI_MULTI_PART_REQUEST_START;
 		ssif_info->multi_data = ssif_info->data;
 		ssif_info->multi_data = ssif_info->data;
@@ -915,7 +989,7 @@ static int start_send(struct ssif_info *ssif_info,
 		return -E2BIG;
 		return -E2BIG;
 
 
 	ssif_info->retries_left = SSIF_SEND_RETRIES;
 	ssif_info->retries_left = SSIF_SEND_RETRIES;
-	memcpy(ssif_info->data+1, data, len);
+	memcpy(ssif_info->data + 1, data, len);
 	ssif_info->data_len = len;
 	ssif_info->data_len = len;
 	return start_resend(ssif_info);
 	return start_resend(ssif_info);
 }
 }
@@ -1200,7 +1274,7 @@ static int smi_type_proc_show(struct seq_file *m, void *v)
 {
 {
 	seq_puts(m, "ssif\n");
 	seq_puts(m, "ssif\n");
 
 
-	return seq_has_overflowed(m);
+	return 0;
 }
 }
 
 
 static int smi_type_proc_open(struct inode *inode, struct file *file)
 static int smi_type_proc_open(struct inode *inode, struct file *file)
@@ -1243,6 +1317,8 @@ static int smi_stats_proc_show(struct seq_file *m, void *v)
 		   ssif_get_stat(ssif_info, events));
 		   ssif_get_stat(ssif_info, events));
 	seq_printf(m, "watchdog_pretimeouts:   %u\n",
 	seq_printf(m, "watchdog_pretimeouts:   %u\n",
 		   ssif_get_stat(ssif_info, watchdog_pretimeouts));
 		   ssif_get_stat(ssif_info, watchdog_pretimeouts));
+	seq_printf(m, "alerts:                 %u\n",
+		   ssif_get_stat(ssif_info, alerts));
 	return 0;
 	return 0;
 }
 }
 
 
@@ -1258,6 +1334,23 @@ static const struct file_operations smi_stats_proc_ops = {
 	.release	= single_release,
 	.release	= single_release,
 };
 };
 
 
+static int strcmp_nospace(char *s1, char *s2)
+{
+	while (*s1 && *s2) {
+		while (isspace(*s1))
+			s1++;
+		while (isspace(*s2))
+			s2++;
+		if (*s1 > *s2)
+			return 1;
+		if (*s1 < *s2)
+			return -1;
+		s1++;
+		s2++;
+	}
+	return 0;
+}
+
 static struct ssif_addr_info *ssif_info_find(unsigned short addr,
 static struct ssif_addr_info *ssif_info_find(unsigned short addr,
 					     char *adapter_name,
 					     char *adapter_name,
 					     bool match_null_name)
 					     bool match_null_name)
@@ -1272,8 +1365,10 @@ restart:
 					/* One is NULL and one is not */
 					/* One is NULL and one is not */
 					continue;
 					continue;
 				}
 				}
-				if (strcmp(info->adapter_name, adapter_name))
-					/* Names to not match */
+				if (adapter_name &&
+				    strcmp_nospace(info->adapter_name,
+						   adapter_name))
+					/* Names do not match */
 					continue;
 					continue;
 			}
 			}
 			found = info;
 			found = info;
@@ -1306,6 +1401,12 @@ static bool check_acpi(struct ssif_info *ssif_info, struct device *dev)
 	return false;
 	return false;
 }
 }
 
 
+/*
+ * Global enables we care about.
+ */
+#define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \
+			     IPMI_BMC_EVT_MSG_INTR)
+
 static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
 static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
 {
 {
 	unsigned char     msg[3];
 	unsigned char     msg[3];
@@ -1391,13 +1492,33 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
 			break;
 			break;
 
 
 		case SSIF_MULTI_2_PART:
 		case SSIF_MULTI_2_PART:
-			if (ssif_info->max_xmit_msg_size > 64)
-				ssif_info->max_xmit_msg_size = 64;
+			if (ssif_info->max_xmit_msg_size > 63)
+				ssif_info->max_xmit_msg_size = 63;
 			if (ssif_info->max_recv_msg_size > 62)
 			if (ssif_info->max_recv_msg_size > 62)
 				ssif_info->max_recv_msg_size = 62;
 				ssif_info->max_recv_msg_size = 62;
 			break;
 			break;
 
 
 		case SSIF_MULTI_n_PART:
 		case SSIF_MULTI_n_PART:
+			/*
+			 * The specification is rather confusing at
+			 * this point, but I think I understand what
+			 * is meant.  At least I have a workable
+			 * solution.  With multi-part messages, you
+			 * cannot send a message that is a multiple of
+			 * 32-bytes in length, because the start and
+			 * middle messages are 32-bytes and the end
+			 * message must be at least one byte.  You
+			 * can't fudge on an extra byte, that would
+			 * screw up things like fru data writes.  So
+			 * we limit the length to 63 bytes.  That way
+			 * a 32-byte message gets sent as a single
+			 * part.  A larger message will be a 32-byte
+			 * start and the next message is always going
+			 * to be 1-31 bytes in length.  Not ideal, but
+			 * it should work.
+			 */
+			if (ssif_info->max_xmit_msg_size > 63)
+				ssif_info->max_xmit_msg_size = 63;
 			break;
 			break;
 
 
 		default:
 		default:
@@ -1407,7 +1528,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
 	} else {
 	} else {
  no_support:
  no_support:
 		/* Assume no multi-part or PEC support */
 		/* Assume no multi-part or PEC support */
-		pr_info(PFX "Error fetching SSIF: %d %d %2.2x, your system probably doesn't support this command so  using defaults\n",
+		pr_info(PFX "Error fetching SSIF: %d %d %2.2x, your system probably doesn't support this command so using defaults\n",
 		       rv, len, resp[2]);
 		       rv, len, resp[2]);
 
 
 		ssif_info->max_xmit_msg_size = 32;
 		ssif_info->max_xmit_msg_size = 32;
@@ -1436,6 +1557,8 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
 		goto found;
 		goto found;
 	}
 	}
 
 
+	ssif_info->global_enables = resp[3];
+
 	if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) {
 	if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) {
 		ssif_info->has_event_buffer = true;
 		ssif_info->has_event_buffer = true;
 		/* buffer is already enabled, nothing to do. */
 		/* buffer is already enabled, nothing to do. */
@@ -1444,18 +1567,37 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
 
 
 	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
 	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
 	msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
 	msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
-	msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF;
+	msg[2] = ssif_info->global_enables | IPMI_BMC_EVT_MSG_BUFF;
 	rv = do_cmd(client, 3, msg, &len, resp);
 	rv = do_cmd(client, 3, msg, &len, resp);
 	if (rv || (len < 2)) {
 	if (rv || (len < 2)) {
-		pr_warn(PFX "Error getting global enables: %d %d %2.2x\n",
+		pr_warn(PFX "Error setting global enables: %d %d %2.2x\n",
 			rv, len, resp[2]);
 			rv, len, resp[2]);
 		rv = 0; /* Not fatal */
 		rv = 0; /* Not fatal */
 		goto found;
 		goto found;
 	}
 	}
 
 
-	if (resp[2] == 0)
+	if (resp[2] == 0) {
 		/* A successful return means the event buffer is supported. */
 		/* A successful return means the event buffer is supported. */
 		ssif_info->has_event_buffer = true;
 		ssif_info->has_event_buffer = true;
+		ssif_info->global_enables |= IPMI_BMC_EVT_MSG_BUFF;
+	}
+
+	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+	msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+	msg[2] = ssif_info->global_enables | IPMI_BMC_RCV_MSG_INTR;
+	rv = do_cmd(client, 3, msg, &len, resp);
+	if (rv || (len < 2)) {
+		pr_warn(PFX "Error setting global enables: %d %d %2.2x\n",
+			rv, len, resp[2]);
+		rv = 0; /* Not fatal */
+		goto found;
+	}
+
+	if (resp[2] == 0) {
+		/* A successful return means the alert is supported. */
+		ssif_info->supports_alert = true;
+		ssif_info->global_enables |= IPMI_BMC_RCV_MSG_INTR;
+	}
 
 
  found:
  found:
 	ssif_info->intf_num = atomic_inc_return(&next_intf);
 	ssif_info->intf_num = atomic_inc_return(&next_intf);
@@ -1813,6 +1955,7 @@ static struct i2c_driver ssif_i2c_driver = {
 	},
 	},
 	.probe		= ssif_probe,
 	.probe		= ssif_probe,
 	.remove		= ssif_remove,
 	.remove		= ssif_remove,
+	.alert		= ssif_alert,
 	.id_table	= ssif_id,
 	.id_table	= ssif_id,
 	.detect		= ssif_detect
 	.detect		= ssif_detect
 };
 };
@@ -1832,7 +1975,7 @@ static int init_ipmi_ssif(void)
 		rv = new_ssif_client(addr[i], adapter_name[i],
 		rv = new_ssif_client(addr[i], adapter_name[i],
 				     dbg[i], slave_addrs[i],
 				     dbg[i], slave_addrs[i],
 				     SI_HARDCODED);
 				     SI_HARDCODED);
-		if (!rv)
+		if (rv)
 			pr_err(PFX
 			pr_err(PFX
 			       "Couldn't add hardcoded device at addr 0x%x\n",
 			       "Couldn't add hardcoded device at addr 0x%x\n",
 			       addr[i]);
 			       addr[i]);

+ 3 - 3
drivers/firmware/efi/runtime-map.c

@@ -120,7 +120,8 @@ add_sysfs_runtime_map_entry(struct kobject *kobj, int nr)
 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 	if (!entry) {
 	if (!entry) {
 		kset_unregister(map_kset);
 		kset_unregister(map_kset);
-		return entry;
+		map_kset = NULL;
+		return ERR_PTR(-ENOMEM);
 	}
 	}
 
 
 	memcpy(&entry->md, efi_runtime_map + nr * efi_memdesc_size,
 	memcpy(&entry->md, efi_runtime_map + nr * efi_memdesc_size,
@@ -132,6 +133,7 @@ add_sysfs_runtime_map_entry(struct kobject *kobj, int nr)
 	if (ret) {
 	if (ret) {
 		kobject_put(&entry->kobj);
 		kobject_put(&entry->kobj);
 		kset_unregister(map_kset);
 		kset_unregister(map_kset);
+		map_kset = NULL;
 		return ERR_PTR(ret);
 		return ERR_PTR(ret);
 	}
 	}
 
 
@@ -195,8 +197,6 @@ out_add_entry:
 		entry = *(map_entries + j);
 		entry = *(map_entries + j);
 		kobject_put(&entry->kobj);
 		kobject_put(&entry->kobj);
 	}
 	}
-	if (map_kset)
-		kset_unregister(map_kset);
 out:
 out:
 	return ret;
 	return ret;
 }
 }

+ 3 - 10
drivers/infiniband/core/addr.c

@@ -472,13 +472,8 @@ int rdma_addr_find_dmac_by_grh(union ib_gid *sgid, union ib_gid *dgid, u8 *dmac,
 	} sgid_addr, dgid_addr;
 	} sgid_addr, dgid_addr;
 
 
 
 
-	ret = rdma_gid2ip(&sgid_addr._sockaddr, sgid);
-	if (ret)
-		return ret;
-
-	ret = rdma_gid2ip(&dgid_addr._sockaddr, dgid);
-	if (ret)
-		return ret;
+	rdma_gid2ip(&sgid_addr._sockaddr, sgid);
+	rdma_gid2ip(&dgid_addr._sockaddr, dgid);
 
 
 	memset(&dev_addr, 0, sizeof(dev_addr));
 	memset(&dev_addr, 0, sizeof(dev_addr));
 
 
@@ -512,10 +507,8 @@ int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id)
 		struct sockaddr_in6 _sockaddr_in6;
 		struct sockaddr_in6 _sockaddr_in6;
 	} gid_addr;
 	} gid_addr;
 
 
-	ret = rdma_gid2ip(&gid_addr._sockaddr, sgid);
+	rdma_gid2ip(&gid_addr._sockaddr, sgid);
 
 
-	if (ret)
-		return ret;
 	memset(&dev_addr, 0, sizeof(dev_addr));
 	memset(&dev_addr, 0, sizeof(dev_addr));
 	ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id);
 	ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id);
 	if (ret)
 	if (ret)

+ 11 - 12
drivers/infiniband/core/cm.c

@@ -437,39 +437,38 @@ static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
 	return cm_id_priv;
 	return cm_id_priv;
 }
 }
 
 
-static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask)
+static void cm_mask_copy(u32 *dst, const u32 *src, const u32 *mask)
 {
 {
 	int i;
 	int i;
 
 
-	for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++)
-		((unsigned long *) dst)[i] = ((unsigned long *) src)[i] &
-					     ((unsigned long *) mask)[i];
+	for (i = 0; i < IB_CM_COMPARE_SIZE; i++)
+		dst[i] = src[i] & mask[i];
 }
 }
 
 
 static int cm_compare_data(struct ib_cm_compare_data *src_data,
 static int cm_compare_data(struct ib_cm_compare_data *src_data,
 			   struct ib_cm_compare_data *dst_data)
 			   struct ib_cm_compare_data *dst_data)
 {
 {
-	u8 src[IB_CM_COMPARE_SIZE];
-	u8 dst[IB_CM_COMPARE_SIZE];
+	u32 src[IB_CM_COMPARE_SIZE];
+	u32 dst[IB_CM_COMPARE_SIZE];
 
 
 	if (!src_data || !dst_data)
 	if (!src_data || !dst_data)
 		return 0;
 		return 0;
 
 
 	cm_mask_copy(src, src_data->data, dst_data->mask);
 	cm_mask_copy(src, src_data->data, dst_data->mask);
 	cm_mask_copy(dst, dst_data->data, src_data->mask);
 	cm_mask_copy(dst, dst_data->data, src_data->mask);
-	return memcmp(src, dst, IB_CM_COMPARE_SIZE);
+	return memcmp(src, dst, sizeof(src));
 }
 }
 
 
-static int cm_compare_private_data(u8 *private_data,
+static int cm_compare_private_data(u32 *private_data,
 				   struct ib_cm_compare_data *dst_data)
 				   struct ib_cm_compare_data *dst_data)
 {
 {
-	u8 src[IB_CM_COMPARE_SIZE];
+	u32 src[IB_CM_COMPARE_SIZE];
 
 
 	if (!dst_data)
 	if (!dst_data)
 		return 0;
 		return 0;
 
 
 	cm_mask_copy(src, private_data, dst_data->mask);
 	cm_mask_copy(src, private_data, dst_data->mask);
-	return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE);
+	return memcmp(src, dst_data->data, sizeof(src));
 }
 }
 
 
 /*
 /*
@@ -538,7 +537,7 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
 
 
 static struct cm_id_private * cm_find_listen(struct ib_device *device,
 static struct cm_id_private * cm_find_listen(struct ib_device *device,
 					     __be64 service_id,
 					     __be64 service_id,
-					     u8 *private_data)
+					     u32 *private_data)
 {
 {
 	struct rb_node *node = cm.listen_service_table.rb_node;
 	struct rb_node *node = cm.listen_service_table.rb_node;
 	struct cm_id_private *cm_id_priv;
 	struct cm_id_private *cm_id_priv;
@@ -953,7 +952,7 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
 		cm_mask_copy(cm_id_priv->compare_data->data,
 		cm_mask_copy(cm_id_priv->compare_data->data,
 			     compare_data->data, compare_data->mask);
 			     compare_data->data, compare_data->mask);
 		memcpy(cm_id_priv->compare_data->mask, compare_data->mask,
 		memcpy(cm_id_priv->compare_data->mask, compare_data->mask,
-		       IB_CM_COMPARE_SIZE);
+		       sizeof(compare_data->mask));
 	}
 	}
 
 
 	cm_id->state = IB_CM_LISTEN;
 	cm_id->state = IB_CM_LISTEN;

+ 2 - 2
drivers/infiniband/core/cm_msgs.h

@@ -103,7 +103,7 @@ struct cm_req_msg {
 	/* local ACK timeout:5, rsvd:3 */
 	/* local ACK timeout:5, rsvd:3 */
 	u8 alt_offset139;
 	u8 alt_offset139;
 
 
-	u8 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE];
+	u32 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
 
 
 } __attribute__ ((packed));
 } __attribute__ ((packed));
 
 
@@ -801,7 +801,7 @@ struct cm_sidr_req_msg {
 	__be16 rsvd;
 	__be16 rsvd;
 	__be64 service_id;
 	__be64 service_id;
 
 
-	u8 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE];
+	u32 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
 } __attribute__ ((packed));
 } __attribute__ ((packed));
 
 
 struct cm_sidr_rep_msg {
 struct cm_sidr_rep_msg {

+ 17 - 10
drivers/infiniband/core/cma.c

@@ -859,19 +859,27 @@ static void cma_save_ib_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id
 	memcpy(&ib->sib_addr, &path->dgid, 16);
 	memcpy(&ib->sib_addr, &path->dgid, 16);
 }
 }
 
 
+static __be16 ss_get_port(const struct sockaddr_storage *ss)
+{
+	if (ss->ss_family == AF_INET)
+		return ((struct sockaddr_in *)ss)->sin_port;
+	else if (ss->ss_family == AF_INET6)
+		return ((struct sockaddr_in6 *)ss)->sin6_port;
+	BUG();
+}
+
 static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
 static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
 			      struct cma_hdr *hdr)
 			      struct cma_hdr *hdr)
 {
 {
-	struct sockaddr_in *listen4, *ip4;
+	struct sockaddr_in *ip4;
 
 
-	listen4 = (struct sockaddr_in *) &listen_id->route.addr.src_addr;
 	ip4 = (struct sockaddr_in *) &id->route.addr.src_addr;
 	ip4 = (struct sockaddr_in *) &id->route.addr.src_addr;
-	ip4->sin_family = listen4->sin_family;
+	ip4->sin_family = AF_INET;
 	ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr;
 	ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr;
-	ip4->sin_port = listen4->sin_port;
+	ip4->sin_port = ss_get_port(&listen_id->route.addr.src_addr);
 
 
 	ip4 = (struct sockaddr_in *) &id->route.addr.dst_addr;
 	ip4 = (struct sockaddr_in *) &id->route.addr.dst_addr;
-	ip4->sin_family = listen4->sin_family;
+	ip4->sin_family = AF_INET;
 	ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr;
 	ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr;
 	ip4->sin_port = hdr->port;
 	ip4->sin_port = hdr->port;
 }
 }
@@ -879,16 +887,15 @@ static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_i
 static void cma_save_ip6_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
 static void cma_save_ip6_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
 			      struct cma_hdr *hdr)
 			      struct cma_hdr *hdr)
 {
 {
-	struct sockaddr_in6 *listen6, *ip6;
+	struct sockaddr_in6 *ip6;
 
 
-	listen6 = (struct sockaddr_in6 *) &listen_id->route.addr.src_addr;
 	ip6 = (struct sockaddr_in6 *) &id->route.addr.src_addr;
 	ip6 = (struct sockaddr_in6 *) &id->route.addr.src_addr;
-	ip6->sin6_family = listen6->sin6_family;
+	ip6->sin6_family = AF_INET6;
 	ip6->sin6_addr = hdr->dst_addr.ip6;
 	ip6->sin6_addr = hdr->dst_addr.ip6;
-	ip6->sin6_port = listen6->sin6_port;
+	ip6->sin6_port = ss_get_port(&listen_id->route.addr.src_addr);
 
 
 	ip6 = (struct sockaddr_in6 *) &id->route.addr.dst_addr;
 	ip6 = (struct sockaddr_in6 *) &id->route.addr.dst_addr;
-	ip6->sin6_family = listen6->sin6_family;
+	ip6->sin6_family = AF_INET6;
 	ip6->sin6_addr = hdr->src_addr.ip6;
 	ip6->sin6_addr = hdr->src_addr.ip6;
 	ip6->sin6_port = hdr->port;
 	ip6->sin6_port = hdr->port;
 }
 }

+ 72 - 1
drivers/infiniband/core/iwpm_msg.c

@@ -468,7 +468,8 @@ add_mapping_response_exit:
 }
 }
 EXPORT_SYMBOL(iwpm_add_mapping_cb);
 EXPORT_SYMBOL(iwpm_add_mapping_cb);
 
 
-/* netlink attribute policy for the response to add and query mapping request */
+/* netlink attribute policy for the response to add and query mapping request
+ * and response with remote address info */
 static const struct nla_policy resp_query_policy[IWPM_NLA_RQUERY_MAPPING_MAX] = {
 static const struct nla_policy resp_query_policy[IWPM_NLA_RQUERY_MAPPING_MAX] = {
 	[IWPM_NLA_QUERY_MAPPING_SEQ]      = { .type = NLA_U32 },
 	[IWPM_NLA_QUERY_MAPPING_SEQ]      = { .type = NLA_U32 },
 	[IWPM_NLA_QUERY_LOCAL_ADDR]       = { .len = sizeof(struct sockaddr_storage) },
 	[IWPM_NLA_QUERY_LOCAL_ADDR]       = { .len = sizeof(struct sockaddr_storage) },
@@ -559,6 +560,76 @@ query_mapping_response_exit:
 }
 }
 EXPORT_SYMBOL(iwpm_add_and_query_mapping_cb);
 EXPORT_SYMBOL(iwpm_add_and_query_mapping_cb);
 
 
+/*
+ * iwpm_remote_info_cb - Process a port mapper message, containing
+ *			  the remote connecting peer address info
+ */
+int iwpm_remote_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
+{
+	struct nlattr *nltb[IWPM_NLA_RQUERY_MAPPING_MAX];
+	struct sockaddr_storage *local_sockaddr, *remote_sockaddr;
+	struct sockaddr_storage *mapped_loc_sockaddr, *mapped_rem_sockaddr;
+	struct iwpm_remote_info *rem_info;
+	const char *msg_type;
+	u8 nl_client;
+	int ret = -EINVAL;
+
+	msg_type = "Remote Mapping info";
+	if (iwpm_parse_nlmsg(cb, IWPM_NLA_RQUERY_MAPPING_MAX,
+				resp_query_policy, nltb, msg_type))
+		return ret;
+
+	nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type);
+	if (!iwpm_valid_client(nl_client)) {
+		pr_info("%s: Invalid port mapper client = %d\n",
+				__func__, nl_client);
+		return ret;
+	}
+	atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
+
+	local_sockaddr = (struct sockaddr_storage *)
+			nla_data(nltb[IWPM_NLA_QUERY_LOCAL_ADDR]);
+	remote_sockaddr = (struct sockaddr_storage *)
+			nla_data(nltb[IWPM_NLA_QUERY_REMOTE_ADDR]);
+	mapped_loc_sockaddr = (struct sockaddr_storage *)
+			nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_LOC_ADDR]);
+	mapped_rem_sockaddr = (struct sockaddr_storage *)
+			nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_REM_ADDR]);
+
+	if (mapped_loc_sockaddr->ss_family != local_sockaddr->ss_family ||
+		mapped_rem_sockaddr->ss_family != remote_sockaddr->ss_family) {
+		pr_info("%s: Sockaddr family doesn't match the requested one\n",
+				__func__);
+		return ret;
+	}
+	rem_info = kzalloc(sizeof(struct iwpm_remote_info), GFP_ATOMIC);
+	if (!rem_info) {
+		pr_err("%s: Unable to allocate a remote info\n", __func__);
+		ret = -ENOMEM;
+		return ret;
+	}
+	memcpy(&rem_info->mapped_loc_sockaddr, mapped_loc_sockaddr,
+	       sizeof(struct sockaddr_storage));
+	memcpy(&rem_info->remote_sockaddr, remote_sockaddr,
+	       sizeof(struct sockaddr_storage));
+	memcpy(&rem_info->mapped_rem_sockaddr, mapped_rem_sockaddr,
+	       sizeof(struct sockaddr_storage));
+	rem_info->nl_client = nl_client;
+
+	iwpm_add_remote_info(rem_info);
+
+	iwpm_print_sockaddr(local_sockaddr,
+			"remote_info: Local sockaddr:");
+	iwpm_print_sockaddr(mapped_loc_sockaddr,
+			"remote_info: Mapped local sockaddr:");
+	iwpm_print_sockaddr(remote_sockaddr,
+			"remote_info: Remote sockaddr:");
+	iwpm_print_sockaddr(mapped_rem_sockaddr,
+			"remote_info: Mapped remote sockaddr:");
+	return ret;
+}
+EXPORT_SYMBOL(iwpm_remote_info_cb);
+
 /* netlink attribute policy for the received request for mapping info */
 /* netlink attribute policy for the received request for mapping info */
 static const struct nla_policy resp_mapinfo_policy[IWPM_NLA_MAPINFO_REQ_MAX] = {
 static const struct nla_policy resp_mapinfo_policy[IWPM_NLA_MAPINFO_REQ_MAX] = {
 	[IWPM_NLA_MAPINFO_ULIB_NAME] = { .type = NLA_STRING,
 	[IWPM_NLA_MAPINFO_ULIB_NAME] = { .type = NLA_STRING,

+ 175 - 33
drivers/infiniband/core/iwpm_util.c

@@ -33,8 +33,10 @@
 
 
 #include "iwpm_util.h"
 #include "iwpm_util.h"
 
 
-#define IWPM_HASH_BUCKET_SIZE	512
-#define IWPM_HASH_BUCKET_MASK	(IWPM_HASH_BUCKET_SIZE - 1)
+#define IWPM_MAPINFO_HASH_SIZE	512
+#define IWPM_MAPINFO_HASH_MASK	(IWPM_MAPINFO_HASH_SIZE - 1)
+#define IWPM_REMINFO_HASH_SIZE	64
+#define IWPM_REMINFO_HASH_MASK	(IWPM_REMINFO_HASH_SIZE - 1)
 
 
 static LIST_HEAD(iwpm_nlmsg_req_list);
 static LIST_HEAD(iwpm_nlmsg_req_list);
 static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock);
 static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock);
@@ -42,31 +44,49 @@ static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock);
 static struct hlist_head *iwpm_hash_bucket;
 static struct hlist_head *iwpm_hash_bucket;
 static DEFINE_SPINLOCK(iwpm_mapinfo_lock);
 static DEFINE_SPINLOCK(iwpm_mapinfo_lock);
 
 
+static struct hlist_head *iwpm_reminfo_bucket;
+static DEFINE_SPINLOCK(iwpm_reminfo_lock);
+
 static DEFINE_MUTEX(iwpm_admin_lock);
 static DEFINE_MUTEX(iwpm_admin_lock);
 static struct iwpm_admin_data iwpm_admin;
 static struct iwpm_admin_data iwpm_admin;
 
 
 int iwpm_init(u8 nl_client)
 int iwpm_init(u8 nl_client)
 {
 {
+	int ret = 0;
 	if (iwpm_valid_client(nl_client))
 	if (iwpm_valid_client(nl_client))
 		return -EINVAL;
 		return -EINVAL;
 	mutex_lock(&iwpm_admin_lock);
 	mutex_lock(&iwpm_admin_lock);
 	if (atomic_read(&iwpm_admin.refcount) == 0) {
 	if (atomic_read(&iwpm_admin.refcount) == 0) {
-		iwpm_hash_bucket = kzalloc(IWPM_HASH_BUCKET_SIZE *
+		iwpm_hash_bucket = kzalloc(IWPM_MAPINFO_HASH_SIZE *
 					sizeof(struct hlist_head), GFP_KERNEL);
 					sizeof(struct hlist_head), GFP_KERNEL);
 		if (!iwpm_hash_bucket) {
 		if (!iwpm_hash_bucket) {
-			mutex_unlock(&iwpm_admin_lock);
+			ret = -ENOMEM;
 			pr_err("%s Unable to create mapinfo hash table\n", __func__);
 			pr_err("%s Unable to create mapinfo hash table\n", __func__);
-			return -ENOMEM;
+			goto init_exit;
+		}
+		iwpm_reminfo_bucket = kzalloc(IWPM_REMINFO_HASH_SIZE *
+					sizeof(struct hlist_head), GFP_KERNEL);
+		if (!iwpm_reminfo_bucket) {
+			kfree(iwpm_hash_bucket);
+			ret = -ENOMEM;
+			pr_err("%s Unable to create reminfo hash table\n", __func__);
+			goto init_exit;
 		}
 		}
 	}
 	}
 	atomic_inc(&iwpm_admin.refcount);
 	atomic_inc(&iwpm_admin.refcount);
+init_exit:
 	mutex_unlock(&iwpm_admin_lock);
 	mutex_unlock(&iwpm_admin_lock);
-	iwpm_set_valid(nl_client, 1);
-	return 0;
+	if (!ret) {
+		iwpm_set_valid(nl_client, 1);
+		pr_debug("%s: Mapinfo and reminfo tables are created\n",
+				__func__);
+	}
+	return ret;
 }
 }
 EXPORT_SYMBOL(iwpm_init);
 EXPORT_SYMBOL(iwpm_init);
 
 
 static void free_hash_bucket(void);
 static void free_hash_bucket(void);
+static void free_reminfo_bucket(void);
 
 
 int iwpm_exit(u8 nl_client)
 int iwpm_exit(u8 nl_client)
 {
 {
@@ -81,7 +101,8 @@ int iwpm_exit(u8 nl_client)
 	}
 	}
 	if (atomic_dec_and_test(&iwpm_admin.refcount)) {
 	if (atomic_dec_and_test(&iwpm_admin.refcount)) {
 		free_hash_bucket();
 		free_hash_bucket();
-		pr_debug("%s: Mapinfo hash table is destroyed\n", __func__);
+		free_reminfo_bucket();
+		pr_debug("%s: Resources are destroyed\n", __func__);
 	}
 	}
 	mutex_unlock(&iwpm_admin_lock);
 	mutex_unlock(&iwpm_admin_lock);
 	iwpm_set_valid(nl_client, 0);
 	iwpm_set_valid(nl_client, 0);
@@ -89,7 +110,7 @@ int iwpm_exit(u8 nl_client)
 }
 }
 EXPORT_SYMBOL(iwpm_exit);
 EXPORT_SYMBOL(iwpm_exit);
 
 
-static struct hlist_head *get_hash_bucket_head(struct sockaddr_storage *,
+static struct hlist_head *get_mapinfo_hash_bucket(struct sockaddr_storage *,
 					       struct sockaddr_storage *);
 					       struct sockaddr_storage *);
 
 
 int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
 int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
@@ -99,9 +120,10 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
 	struct hlist_head *hash_bucket_head;
 	struct hlist_head *hash_bucket_head;
 	struct iwpm_mapping_info *map_info;
 	struct iwpm_mapping_info *map_info;
 	unsigned long flags;
 	unsigned long flags;
+	int ret = -EINVAL;
 
 
 	if (!iwpm_valid_client(nl_client))
 	if (!iwpm_valid_client(nl_client))
-		return -EINVAL;
+		return ret;
 	map_info = kzalloc(sizeof(struct iwpm_mapping_info), GFP_KERNEL);
 	map_info = kzalloc(sizeof(struct iwpm_mapping_info), GFP_KERNEL);
 	if (!map_info) {
 	if (!map_info) {
 		pr_err("%s: Unable to allocate a mapping info\n", __func__);
 		pr_err("%s: Unable to allocate a mapping info\n", __func__);
@@ -115,13 +137,16 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
 
 
 	spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
 	spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
 	if (iwpm_hash_bucket) {
 	if (iwpm_hash_bucket) {
-		hash_bucket_head = get_hash_bucket_head(
+		hash_bucket_head = get_mapinfo_hash_bucket(
 					&map_info->local_sockaddr,
 					&map_info->local_sockaddr,
 					&map_info->mapped_sockaddr);
 					&map_info->mapped_sockaddr);
-		hlist_add_head(&map_info->hlist_node, hash_bucket_head);
+		if (hash_bucket_head) {
+			hlist_add_head(&map_info->hlist_node, hash_bucket_head);
+			ret = 0;
+		}
 	}
 	}
 	spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
 	spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
-	return 0;
+	return ret;
 }
 }
 EXPORT_SYMBOL(iwpm_create_mapinfo);
 EXPORT_SYMBOL(iwpm_create_mapinfo);
 
 
@@ -136,9 +161,12 @@ int iwpm_remove_mapinfo(struct sockaddr_storage *local_sockaddr,
 
 
 	spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
 	spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
 	if (iwpm_hash_bucket) {
 	if (iwpm_hash_bucket) {
-		hash_bucket_head = get_hash_bucket_head(
+		hash_bucket_head = get_mapinfo_hash_bucket(
 					local_sockaddr,
 					local_sockaddr,
 					mapped_local_addr);
 					mapped_local_addr);
+		if (!hash_bucket_head)
+			goto remove_mapinfo_exit;
+
 		hlist_for_each_entry_safe(map_info, tmp_hlist_node,
 		hlist_for_each_entry_safe(map_info, tmp_hlist_node,
 					hash_bucket_head, hlist_node) {
 					hash_bucket_head, hlist_node) {
 
 
@@ -152,6 +180,7 @@ int iwpm_remove_mapinfo(struct sockaddr_storage *local_sockaddr,
 			}
 			}
 		}
 		}
 	}
 	}
+remove_mapinfo_exit:
 	spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
 	spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
 	return ret;
 	return ret;
 }
 }
@@ -166,7 +195,7 @@ static void free_hash_bucket(void)
 
 
 	/* remove all the mapinfo data from the list */
 	/* remove all the mapinfo data from the list */
 	spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
 	spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
-	for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) {
+	for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) {
 		hlist_for_each_entry_safe(map_info, tmp_hlist_node,
 		hlist_for_each_entry_safe(map_info, tmp_hlist_node,
 			&iwpm_hash_bucket[i], hlist_node) {
 			&iwpm_hash_bucket[i], hlist_node) {
 
 
@@ -180,6 +209,96 @@ static void free_hash_bucket(void)
 	spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
 	spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
 }
 }
 
 
+static void free_reminfo_bucket(void)
+{
+	struct hlist_node *tmp_hlist_node;
+	struct iwpm_remote_info *rem_info;
+	unsigned long flags;
+	int i;
+
+	/* remove all the remote info from the list */
+	spin_lock_irqsave(&iwpm_reminfo_lock, flags);
+	for (i = 0; i < IWPM_REMINFO_HASH_SIZE; i++) {
+		hlist_for_each_entry_safe(rem_info, tmp_hlist_node,
+			&iwpm_reminfo_bucket[i], hlist_node) {
+
+				hlist_del_init(&rem_info->hlist_node);
+				kfree(rem_info);
+			}
+	}
+	/* free the hash list */
+	kfree(iwpm_reminfo_bucket);
+	iwpm_reminfo_bucket = NULL;
+	spin_unlock_irqrestore(&iwpm_reminfo_lock, flags);
+}
+
+static struct hlist_head *get_reminfo_hash_bucket(struct sockaddr_storage *,
+						struct sockaddr_storage *);
+
+void iwpm_add_remote_info(struct iwpm_remote_info *rem_info)
+{
+	struct hlist_head *hash_bucket_head;
+	unsigned long flags;
+
+	spin_lock_irqsave(&iwpm_reminfo_lock, flags);
+	if (iwpm_reminfo_bucket) {
+		hash_bucket_head = get_reminfo_hash_bucket(
+					&rem_info->mapped_loc_sockaddr,
+					&rem_info->mapped_rem_sockaddr);
+		if (hash_bucket_head)
+			hlist_add_head(&rem_info->hlist_node, hash_bucket_head);
+	}
+	spin_unlock_irqrestore(&iwpm_reminfo_lock, flags);
+}
+
+int iwpm_get_remote_info(struct sockaddr_storage *mapped_loc_addr,
+				struct sockaddr_storage *mapped_rem_addr,
+				struct sockaddr_storage *remote_addr,
+				u8 nl_client)
+{
+	struct hlist_node *tmp_hlist_node;
+	struct hlist_head *hash_bucket_head;
+	struct iwpm_remote_info *rem_info = NULL;
+	unsigned long flags;
+	int ret = -EINVAL;
+
+	if (!iwpm_valid_client(nl_client)) {
+		pr_info("%s: Invalid client = %d\n", __func__, nl_client);
+		return ret;
+	}
+	spin_lock_irqsave(&iwpm_reminfo_lock, flags);
+	if (iwpm_reminfo_bucket) {
+		hash_bucket_head = get_reminfo_hash_bucket(
+					mapped_loc_addr,
+					mapped_rem_addr);
+		if (!hash_bucket_head)
+			goto get_remote_info_exit;
+		hlist_for_each_entry_safe(rem_info, tmp_hlist_node,
+					hash_bucket_head, hlist_node) {
+
+			if (!iwpm_compare_sockaddr(&rem_info->mapped_loc_sockaddr,
+				mapped_loc_addr) &&
+				!iwpm_compare_sockaddr(&rem_info->mapped_rem_sockaddr,
+				mapped_rem_addr)) {
+
+				memcpy(remote_addr, &rem_info->remote_sockaddr,
+					sizeof(struct sockaddr_storage));
+				iwpm_print_sockaddr(remote_addr,
+						"get_remote_info: Remote sockaddr:");
+
+				hlist_del_init(&rem_info->hlist_node);
+				kfree(rem_info);
+				ret = 0;
+				break;
+			}
+		}
+	}
+get_remote_info_exit:
+	spin_unlock_irqrestore(&iwpm_reminfo_lock, flags);
+	return ret;
+}
+EXPORT_SYMBOL(iwpm_get_remote_info);
+
 struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq,
 struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq,
 					u8 nl_client, gfp_t gfp)
 					u8 nl_client, gfp_t gfp)
 {
 {
@@ -409,31 +528,54 @@ static u32 iwpm_ipv4_jhash(struct sockaddr_in *ipv4_sockaddr)
 	return hash;
 	return hash;
 }
 }
 
 
-static struct hlist_head *get_hash_bucket_head(struct sockaddr_storage
-					       *local_sockaddr,
-					       struct sockaddr_storage
-					       *mapped_sockaddr)
+static int get_hash_bucket(struct sockaddr_storage *a_sockaddr,
+				struct sockaddr_storage *b_sockaddr, u32 *hash)
 {
 {
-	u32 local_hash, mapped_hash, hash;
+	u32 a_hash, b_hash;
 
 
-	if (local_sockaddr->ss_family == AF_INET) {
-		local_hash = iwpm_ipv4_jhash((struct sockaddr_in *) local_sockaddr);
-		mapped_hash = iwpm_ipv4_jhash((struct sockaddr_in *) mapped_sockaddr);
+	if (a_sockaddr->ss_family == AF_INET) {
+		a_hash = iwpm_ipv4_jhash((struct sockaddr_in *) a_sockaddr);
+		b_hash = iwpm_ipv4_jhash((struct sockaddr_in *) b_sockaddr);
 
 
-	} else if (local_sockaddr->ss_family == AF_INET6) {
-		local_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) local_sockaddr);
-		mapped_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) mapped_sockaddr);
+	} else if (a_sockaddr->ss_family == AF_INET6) {
+		a_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) a_sockaddr);
+		b_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) b_sockaddr);
 	} else {
 	} else {
 		pr_err("%s: Invalid sockaddr family\n", __func__);
 		pr_err("%s: Invalid sockaddr family\n", __func__);
-		return NULL;
+		return -EINVAL;
 	}
 	}
 
 
-	if (local_hash == mapped_hash) /* if port mapper isn't available */
-		hash = local_hash;
+	if (a_hash == b_hash) /* if port mapper isn't available */
+		*hash = a_hash;
 	else
 	else
-		hash = jhash_2words(local_hash, mapped_hash, 0);
+		*hash = jhash_2words(a_hash, b_hash, 0);
+	return 0;
+}
+
+static struct hlist_head *get_mapinfo_hash_bucket(struct sockaddr_storage
+				*local_sockaddr, struct sockaddr_storage
+				*mapped_sockaddr)
+{
+	u32 hash;
+	int ret;
 
 
-	return &iwpm_hash_bucket[hash & IWPM_HASH_BUCKET_MASK];
+	ret = get_hash_bucket(local_sockaddr, mapped_sockaddr, &hash);
+	if (ret)
+		return NULL;
+	return &iwpm_hash_bucket[hash & IWPM_MAPINFO_HASH_MASK];
+}
+
+static struct hlist_head *get_reminfo_hash_bucket(struct sockaddr_storage
+				*mapped_loc_sockaddr, struct sockaddr_storage
+				*mapped_rem_sockaddr)
+{
+	u32 hash;
+	int ret;
+
+	ret = get_hash_bucket(mapped_loc_sockaddr, mapped_rem_sockaddr, &hash);
+	if (ret)
+		return NULL;
+	return &iwpm_reminfo_bucket[hash & IWPM_REMINFO_HASH_MASK];
 }
 }
 
 
 static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid)
 static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid)
@@ -512,7 +654,7 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid)
 	}
 	}
 	skb_num++;
 	skb_num++;
 	spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
 	spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
-	for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) {
+	for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) {
 		hlist_for_each_entry(map_info, &iwpm_hash_bucket[i],
 		hlist_for_each_entry(map_info, &iwpm_hash_bucket[i],
 				     hlist_node) {
 				     hlist_node) {
 			if (map_info->nl_client != nl_client)
 			if (map_info->nl_client != nl_client)
@@ -595,7 +737,7 @@ int iwpm_mapinfo_available(void)
 
 
 	spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
 	spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
 	if (iwpm_hash_bucket) {
 	if (iwpm_hash_bucket) {
-		for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) {
+		for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) {
 			if (!hlist_empty(&iwpm_hash_bucket[i])) {
 			if (!hlist_empty(&iwpm_hash_bucket[i])) {
 				full_bucket = 1;
 				full_bucket = 1;
 				break;
 				break;

+ 15 - 0
drivers/infiniband/core/iwpm_util.h

@@ -76,6 +76,14 @@ struct iwpm_mapping_info {
 	u8     nl_client;
 	u8     nl_client;
 };
 };
 
 
+struct iwpm_remote_info {
+	struct hlist_node hlist_node;
+	struct sockaddr_storage remote_sockaddr;
+	struct sockaddr_storage mapped_loc_sockaddr;
+	struct sockaddr_storage mapped_rem_sockaddr;
+	u8     nl_client;
+};
+
 struct iwpm_admin_data {
 struct iwpm_admin_data {
 	atomic_t refcount;
 	atomic_t refcount;
 	atomic_t nlmsg_seq;
 	atomic_t nlmsg_seq;
@@ -127,6 +135,13 @@ int iwpm_wait_complete_req(struct iwpm_nlmsg_request *nlmsg_request);
  */
  */
 int iwpm_get_nlmsg_seq(void);
 int iwpm_get_nlmsg_seq(void);
 
 
+/**
+ * iwpm_add_reminfo - Add remote address info of the connecting peer
+ *                    to the remote info hash table
+ * @reminfo: The remote info to be added
+ */
+void iwpm_add_remote_info(struct iwpm_remote_info *reminfo);
+
 /**
 /**
  * iwpm_valid_client - Check if the port mapper client is valid
  * iwpm_valid_client - Check if the port mapper client is valid
  * @nl_client: The index of the netlink client
  * @nl_client: The index of the netlink client

+ 7 - 7
drivers/infiniband/core/umem_odp.c

@@ -446,7 +446,6 @@ static int ib_umem_odp_map_dma_single_page(
 	int remove_existing_mapping = 0;
 	int remove_existing_mapping = 0;
 	int ret = 0;
 	int ret = 0;
 
 
-	mutex_lock(&umem->odp_data->umem_mutex);
 	/*
 	/*
 	 * Note: we avoid writing if seq is different from the initial seq, to
 	 * Note: we avoid writing if seq is different from the initial seq, to
 	 * handle case of a racing notifier. This check also allows us to bail
 	 * handle case of a racing notifier. This check also allows us to bail
@@ -479,8 +478,6 @@ static int ib_umem_odp_map_dma_single_page(
 	}
 	}
 
 
 out:
 out:
-	mutex_unlock(&umem->odp_data->umem_mutex);
-
 	/* On Demand Paging - avoid pinning the page */
 	/* On Demand Paging - avoid pinning the page */
 	if (umem->context->invalidate_range || !stored_page)
 	if (umem->context->invalidate_range || !stored_page)
 		put_page(page);
 		put_page(page);
@@ -586,6 +583,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
 
 
 		bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
 		bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
 		user_virt += npages << PAGE_SHIFT;
 		user_virt += npages << PAGE_SHIFT;
+		mutex_lock(&umem->odp_data->umem_mutex);
 		for (j = 0; j < npages; ++j) {
 		for (j = 0; j < npages; ++j) {
 			ret = ib_umem_odp_map_dma_single_page(
 			ret = ib_umem_odp_map_dma_single_page(
 				umem, k, base_virt_addr, local_page_list[j],
 				umem, k, base_virt_addr, local_page_list[j],
@@ -594,6 +592,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
 				break;
 				break;
 			k++;
 			k++;
 		}
 		}
+		mutex_unlock(&umem->odp_data->umem_mutex);
 
 
 		if (ret < 0) {
 		if (ret < 0) {
 			/* Release left over pages when handling errors. */
 			/* Release left over pages when handling errors. */
@@ -633,12 +632,11 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
 	 * faults from completion. We might be racing with other
 	 * faults from completion. We might be racing with other
 	 * invalidations, so we must make sure we free each page only
 	 * invalidations, so we must make sure we free each page only
 	 * once. */
 	 * once. */
+	mutex_lock(&umem->odp_data->umem_mutex);
 	for (addr = virt; addr < bound; addr += (u64)umem->page_size) {
 	for (addr = virt; addr < bound; addr += (u64)umem->page_size) {
 		idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
 		idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
-		mutex_lock(&umem->odp_data->umem_mutex);
 		if (umem->odp_data->page_list[idx]) {
 		if (umem->odp_data->page_list[idx]) {
 			struct page *page = umem->odp_data->page_list[idx];
 			struct page *page = umem->odp_data->page_list[idx];
-			struct page *head_page = compound_head(page);
 			dma_addr_t dma = umem->odp_data->dma_list[idx];
 			dma_addr_t dma = umem->odp_data->dma_list[idx];
 			dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK;
 			dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK;
 
 
@@ -646,7 +644,8 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
 
 
 			ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE,
 			ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE,
 					  DMA_BIDIRECTIONAL);
 					  DMA_BIDIRECTIONAL);
-			if (dma & ODP_WRITE_ALLOWED_BIT)
+			if (dma & ODP_WRITE_ALLOWED_BIT) {
+				struct page *head_page = compound_head(page);
 				/*
 				/*
 				 * set_page_dirty prefers being called with
 				 * set_page_dirty prefers being called with
 				 * the page lock. However, MMU notifiers are
 				 * the page lock. However, MMU notifiers are
@@ -657,13 +656,14 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
 				 * be removed.
 				 * be removed.
 				 */
 				 */
 				set_page_dirty(head_page);
 				set_page_dirty(head_page);
+			}
 			/* on demand pinning support */
 			/* on demand pinning support */
 			if (!umem->context->invalidate_range)
 			if (!umem->context->invalidate_range)
 				put_page(page);
 				put_page(page);
 			umem->odp_data->page_list[idx] = NULL;
 			umem->odp_data->page_list[idx] = NULL;
 			umem->odp_data->dma_list[idx] = 0;
 			umem->odp_data->dma_list[idx] = 0;
 		}
 		}
-		mutex_unlock(&umem->odp_data->umem_mutex);
 	}
 	}
+	mutex_unlock(&umem->odp_data->umem_mutex);
 }
 }
 EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
 EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);

+ 71 - 16
drivers/infiniband/hw/cxgb4/cm.c

@@ -583,6 +583,22 @@ static void c4iw_record_pm_msg(struct c4iw_ep *ep,
 		sizeof(ep->com.mapped_remote_addr));
 		sizeof(ep->com.mapped_remote_addr));
 }
 }
 
 
+static int get_remote_addr(struct c4iw_ep *ep)
+{
+	int ret;
+
+	print_addr(&ep->com, __func__, "get_remote_addr");
+
+	ret = iwpm_get_remote_info(&ep->com.mapped_local_addr,
+				   &ep->com.mapped_remote_addr,
+				   &ep->com.remote_addr, RDMA_NL_C4IW);
+	if (ret)
+		pr_info(MOD "Unable to find remote peer addr info - err %d\n",
+			ret);
+
+	return ret;
+}
+
 static void best_mtu(const unsigned short *mtus, unsigned short mtu,
 static void best_mtu(const unsigned short *mtus, unsigned short mtu,
 		     unsigned int *idx, int use_ts, int ipv6)
 		     unsigned int *idx, int use_ts, int ipv6)
 {
 {
@@ -675,7 +691,7 @@ static int send_connect(struct c4iw_ep *ep)
 	if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
 	if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
 		opt2 |= T5_OPT_2_VALID_F;
 		opt2 |= T5_OPT_2_VALID_F;
 		opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
 		opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
-		opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
+		opt2 |= T5_ISS_F;
 	}
 	}
 	t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
 	t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
 
 
@@ -2042,9 +2058,12 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
 	     status, status2errno(status));
 	     status, status2errno(status));
 
 
 	if (is_neg_adv(status)) {
 	if (is_neg_adv(status)) {
-		dev_warn(&dev->rdev.lldi.pdev->dev,
-			 "Connection problems for atid %u status %u (%s)\n",
-			 atid, status, neg_adv_str(status));
+		PDBG("%s Connection problems for atid %u status %u (%s)\n",
+		     __func__, atid, status, neg_adv_str(status));
+		ep->stats.connect_neg_adv++;
+		mutex_lock(&dev->rdev.stats.lock);
+		dev->rdev.stats.neg_adv++;
+		mutex_unlock(&dev->rdev.stats.lock);
 		return 0;
 		return 0;
 	}
 	}
 
 
@@ -2214,7 +2233,7 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
 		u32 isn = (prandom_u32() & ~7UL) - 1;
 		u32 isn = (prandom_u32() & ~7UL) - 1;
 		opt2 |= T5_OPT_2_VALID_F;
 		opt2 |= T5_OPT_2_VALID_F;
 		opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
 		opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
-		opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
+		opt2 |= T5_ISS_F;
 		rpl5 = (void *)rpl;
 		rpl5 = (void *)rpl;
 		memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
 		memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
 		if (peer2peer)
 		if (peer2peer)
@@ -2352,27 +2371,57 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
 	state_set(&child_ep->com, CONNECTING);
 	state_set(&child_ep->com, CONNECTING);
 	child_ep->com.dev = dev;
 	child_ep->com.dev = dev;
 	child_ep->com.cm_id = NULL;
 	child_ep->com.cm_id = NULL;
+
+	/*
+	 * The mapped_local and mapped_remote addresses get setup with
+	 * the actual 4-tuple.  The local address will be based on the
+	 * actual local address of the connection, but on the port number
+	 * of the parent listening endpoint.  The remote address is
+	 * setup based on a query to the IWPM since we don't know what it
+	 * originally was before mapping.  If no mapping was done, then
+	 * mapped_remote == remote, and mapped_local == local.
+	 */
 	if (iptype == 4) {
 	if (iptype == 4) {
 		struct sockaddr_in *sin = (struct sockaddr_in *)
 		struct sockaddr_in *sin = (struct sockaddr_in *)
-			&child_ep->com.local_addr;
+			&child_ep->com.mapped_local_addr;
+
 		sin->sin_family = PF_INET;
 		sin->sin_family = PF_INET;
 		sin->sin_port = local_port;
 		sin->sin_port = local_port;
 		sin->sin_addr.s_addr = *(__be32 *)local_ip;
 		sin->sin_addr.s_addr = *(__be32 *)local_ip;
-		sin = (struct sockaddr_in *)&child_ep->com.remote_addr;
+
+		sin = (struct sockaddr_in *)&child_ep->com.local_addr;
+		sin->sin_family = PF_INET;
+		sin->sin_port = ((struct sockaddr_in *)
+				 &parent_ep->com.local_addr)->sin_port;
+		sin->sin_addr.s_addr = *(__be32 *)local_ip;
+
+		sin = (struct sockaddr_in *)&child_ep->com.mapped_remote_addr;
 		sin->sin_family = PF_INET;
 		sin->sin_family = PF_INET;
 		sin->sin_port = peer_port;
 		sin->sin_port = peer_port;
 		sin->sin_addr.s_addr = *(__be32 *)peer_ip;
 		sin->sin_addr.s_addr = *(__be32 *)peer_ip;
 	} else {
 	} else {
 		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
 		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
-			&child_ep->com.local_addr;
+			&child_ep->com.mapped_local_addr;
+
 		sin6->sin6_family = PF_INET6;
 		sin6->sin6_family = PF_INET6;
 		sin6->sin6_port = local_port;
 		sin6->sin6_port = local_port;
 		memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
 		memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
-		sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr;
+
+		sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
+		sin6->sin6_family = PF_INET6;
+		sin6->sin6_port = ((struct sockaddr_in6 *)
+				   &parent_ep->com.local_addr)->sin6_port;
+		memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
+
+		sin6 = (struct sockaddr_in6 *)&child_ep->com.mapped_remote_addr;
 		sin6->sin6_family = PF_INET6;
 		sin6->sin6_family = PF_INET6;
 		sin6->sin6_port = peer_port;
 		sin6->sin6_port = peer_port;
 		memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
 		memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
 	}
 	}
+	memcpy(&child_ep->com.remote_addr, &child_ep->com.mapped_remote_addr,
+	       sizeof(child_ep->com.remote_addr));
+	get_remote_addr(child_ep);
+
 	c4iw_get_ep(&parent_ep->com);
 	c4iw_get_ep(&parent_ep->com);
 	child_ep->parent_ep = parent_ep;
 	child_ep->parent_ep = parent_ep;
 	child_ep->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
 	child_ep->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
@@ -2520,9 +2569,13 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
 
 
 	ep = lookup_tid(t, tid);
 	ep = lookup_tid(t, tid);
 	if (is_neg_adv(req->status)) {
 	if (is_neg_adv(req->status)) {
-		dev_warn(&dev->rdev.lldi.pdev->dev,
-			 "Negative advice on abort - tid %u status %d (%s)\n",
-			 ep->hwtid, req->status, neg_adv_str(req->status));
+		PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
+		     __func__, ep->hwtid, req->status,
+		     neg_adv_str(req->status));
+		ep->stats.abort_neg_adv++;
+		mutex_lock(&dev->rdev.stats.lock);
+		dev->rdev.stats.neg_adv++;
+		mutex_unlock(&dev->rdev.stats.lock);
 		return 0;
 		return 0;
 	}
 	}
 	PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
 	PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
@@ -3571,7 +3624,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
 	 * TP will ignore any value > 0 for MSS index.
 	 * TP will ignore any value > 0 for MSS index.
 	 */
 	 */
 	req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF));
 	req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF));
-	req->cookie = (unsigned long)skb;
+	req->cookie = (uintptr_t)skb;
 
 
 	set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
 	set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
 	ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
 	ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
@@ -3931,9 +3984,11 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
 		return 0;
 		return 0;
 	}
 	}
 	if (is_neg_adv(req->status)) {
 	if (is_neg_adv(req->status)) {
-		dev_warn(&dev->rdev.lldi.pdev->dev,
-			 "Negative advice on abort - tid %u status %d (%s)\n",
-			 ep->hwtid, req->status, neg_adv_str(req->status));
+		PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
+		     __func__, ep->hwtid, req->status,
+		     neg_adv_str(req->status));
+		ep->stats.abort_neg_adv++;
+		dev->rdev.stats.neg_adv++;
 		kfree_skb(skb);
 		kfree_skb(skb);
 		return 0;
 		return 0;
 	}
 	}

+ 14 - 8
drivers/infiniband/hw/cxgb4/cq.c

@@ -55,7 +55,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
 			FW_RI_RES_WR_NRES_V(1) |
 			FW_RI_RES_WR_NRES_V(1) |
 			FW_WR_COMPL_F);
 			FW_WR_COMPL_F);
 	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
 	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
-	res_wr->cookie = (unsigned long) &wr_wait;
+	res_wr->cookie = (uintptr_t)&wr_wait;
 	res = res_wr->res;
 	res = res_wr->res;
 	res->u.cq.restype = FW_RI_RES_TYPE_CQ;
 	res->u.cq.restype = FW_RI_RES_TYPE_CQ;
 	res->u.cq.op = FW_RI_RES_OP_RESET;
 	res->u.cq.op = FW_RI_RES_OP_RESET;
@@ -125,7 +125,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
 			FW_RI_RES_WR_NRES_V(1) |
 			FW_RI_RES_WR_NRES_V(1) |
 			FW_WR_COMPL_F);
 			FW_WR_COMPL_F);
 	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
 	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
-	res_wr->cookie = (unsigned long) &wr_wait;
+	res_wr->cookie = (uintptr_t)&wr_wait;
 	res = res_wr->res;
 	res = res_wr->res;
 	res->u.cq.restype = FW_RI_RES_TYPE_CQ;
 	res->u.cq.restype = FW_RI_RES_TYPE_CQ;
 	res->u.cq.op = FW_RI_RES_OP_WRITE;
 	res->u.cq.op = FW_RI_RES_OP_WRITE;
@@ -156,12 +156,19 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
 		goto err4;
 		goto err4;
 
 
 	cq->gen = 1;
 	cq->gen = 1;
-	cq->gts = rdev->lldi.gts_reg;
 	cq->rdev = rdev;
 	cq->rdev = rdev;
 	if (user) {
 	if (user) {
-		cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
-					(cq->cqid << rdev->cqshift);
-		cq->ugts &= PAGE_MASK;
+		u32 off = (cq->cqid << rdev->cqshift) & PAGE_MASK;
+
+		cq->ugts = (u64)rdev->bar2_pa + off;
+	} else if (is_t4(rdev->lldi.adapter_type)) {
+		cq->gts = rdev->lldi.gts_reg;
+		cq->qid_mask = -1U;
+	} else {
+		u32 off = ((cq->cqid << rdev->cqshift) & PAGE_MASK) + 12;
+
+		cq->gts = rdev->bar2_kva + off;
+		cq->qid_mask = rdev->qpmask;
 	}
 	}
 	return 0;
 	return 0;
 err4:
 err4:
@@ -970,8 +977,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
 	}
 	}
 	PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
 	PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
 	     __func__, chp->cq.cqid, chp, chp->cq.size,
 	     __func__, chp->cq.cqid, chp, chp->cq.size,
-	     chp->cq.memsize,
-	     (unsigned long long) chp->cq.dma_addr);
+	     chp->cq.memsize, (unsigned long long) chp->cq.dma_addr);
 	return &chp->ibcq;
 	return &chp->ibcq;
 err5:
 err5:
 	kfree(mm2);
 	kfree(mm2);

+ 34 - 3
drivers/infiniband/hw/cxgb4/device.c

@@ -93,6 +93,7 @@ static struct ibnl_client_cbs c4iw_nl_cb_table[] = {
 	[RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
 	[RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
 	[RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
 	[RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
 	[RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
 	[RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
+	[RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
 	[RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
 	[RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
 	[RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
 	[RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
 };
 };
@@ -151,7 +152,7 @@ static int wr_log_show(struct seq_file *seq, void *v)
 	int prev_ts_set = 0;
 	int prev_ts_set = 0;
 	int idx, end;
 	int idx, end;
 
 
-#define ts2ns(ts) div64_ul((ts) * dev->rdev.lldi.cclk_ps, 1000)
+#define ts2ns(ts) div64_u64((ts) * dev->rdev.lldi.cclk_ps, 1000)
 
 
 	idx = atomic_read(&dev->rdev.wr_log_idx) &
 	idx = atomic_read(&dev->rdev.wr_log_idx) &
 		(dev->rdev.wr_log_size - 1);
 		(dev->rdev.wr_log_size - 1);
@@ -489,6 +490,7 @@ static int stats_show(struct seq_file *seq, void *v)
 		   dev->rdev.stats.act_ofld_conn_fails);
 		   dev->rdev.stats.act_ofld_conn_fails);
 	seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
 	seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
 		   dev->rdev.stats.pas_ofld_conn_fails);
 		   dev->rdev.stats.pas_ofld_conn_fails);
+	seq_printf(seq, "NEG_ADV_RCVD: %10llu\n", dev->rdev.stats.neg_adv);
 	seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird);
 	seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird);
 	return 0;
 	return 0;
 }
 }
@@ -560,10 +562,13 @@ static int dump_ep(int id, void *p, void *data)
 		cc = snprintf(epd->buf + epd->pos, space,
 		cc = snprintf(epd->buf + epd->pos, space,
 			      "ep %p cm_id %p qp %p state %d flags 0x%lx "
 			      "ep %p cm_id %p qp %p state %d flags 0x%lx "
 			      "history 0x%lx hwtid %d atid %d "
 			      "history 0x%lx hwtid %d atid %d "
+			      "conn_na %u abort_na %u "
 			      "%pI4:%d/%d <-> %pI4:%d/%d\n",
 			      "%pI4:%d/%d <-> %pI4:%d/%d\n",
 			      ep, ep->com.cm_id, ep->com.qp,
 			      ep, ep->com.cm_id, ep->com.qp,
 			      (int)ep->com.state, ep->com.flags,
 			      (int)ep->com.state, ep->com.flags,
 			      ep->com.history, ep->hwtid, ep->atid,
 			      ep->com.history, ep->hwtid, ep->atid,
+			      ep->stats.connect_neg_adv,
+			      ep->stats.abort_neg_adv,
 			      &lsin->sin_addr, ntohs(lsin->sin_port),
 			      &lsin->sin_addr, ntohs(lsin->sin_port),
 			      ntohs(mapped_lsin->sin_port),
 			      ntohs(mapped_lsin->sin_port),
 			      &rsin->sin_addr, ntohs(rsin->sin_port),
 			      &rsin->sin_addr, ntohs(rsin->sin_port),
@@ -581,10 +586,13 @@ static int dump_ep(int id, void *p, void *data)
 		cc = snprintf(epd->buf + epd->pos, space,
 		cc = snprintf(epd->buf + epd->pos, space,
 			      "ep %p cm_id %p qp %p state %d flags 0x%lx "
 			      "ep %p cm_id %p qp %p state %d flags 0x%lx "
 			      "history 0x%lx hwtid %d atid %d "
 			      "history 0x%lx hwtid %d atid %d "
+			      "conn_na %u abort_na %u "
 			      "%pI6:%d/%d <-> %pI6:%d/%d\n",
 			      "%pI6:%d/%d <-> %pI6:%d/%d\n",
 			      ep, ep->com.cm_id, ep->com.qp,
 			      ep, ep->com.cm_id, ep->com.qp,
 			      (int)ep->com.state, ep->com.flags,
 			      (int)ep->com.state, ep->com.flags,
 			      ep->com.history, ep->hwtid, ep->atid,
 			      ep->com.history, ep->hwtid, ep->atid,
+			      ep->stats.connect_neg_adv,
+			      ep->stats.abort_neg_adv,
 			      &lsin6->sin6_addr, ntohs(lsin6->sin6_port),
 			      &lsin6->sin6_addr, ntohs(lsin6->sin6_port),
 			      ntohs(mapped_lsin6->sin6_port),
 			      ntohs(mapped_lsin6->sin6_port),
 			      &rsin6->sin6_addr, ntohs(rsin6->sin6_port),
 			      &rsin6->sin6_addr, ntohs(rsin6->sin6_port),
@@ -764,6 +772,29 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
 
 
 	c4iw_init_dev_ucontext(rdev, &rdev->uctx);
 	c4iw_init_dev_ucontext(rdev, &rdev->uctx);
 
 
+	/*
+	 * This implementation assumes udb_density == ucq_density!  Eventually
+	 * we might need to support this but for now fail the open. Also the
+	 * cqid and qpid range must match for now.
+	 */
+	if (rdev->lldi.udb_density != rdev->lldi.ucq_density) {
+		pr_err(MOD "%s: unsupported udb/ucq densities %u/%u\n",
+		       pci_name(rdev->lldi.pdev), rdev->lldi.udb_density,
+		       rdev->lldi.ucq_density);
+		err = -EINVAL;
+		goto err1;
+	}
+	if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start ||
+	    rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) {
+		pr_err(MOD "%s: unsupported qp and cq id ranges "
+		       "qp start %u size %u cq start %u size %u\n",
+		       pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start,
+		       rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size,
+		       rdev->lldi.vr->cq.size);
+		err = -EINVAL;
+		goto err1;
+	}
+
 	/*
 	/*
 	 * qpshift is the number of bits to shift the qpid left in order
 	 * qpshift is the number of bits to shift the qpid left in order
 	 * to get the correct address of the doorbell for that qp.
 	 * to get the correct address of the doorbell for that qp.
@@ -784,10 +815,10 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
 	     rdev->lldi.vr->qp.size,
 	     rdev->lldi.vr->qp.size,
 	     rdev->lldi.vr->cq.start,
 	     rdev->lldi.vr->cq.start,
 	     rdev->lldi.vr->cq.size);
 	     rdev->lldi.vr->cq.size);
-	PDBG("udb len 0x%x udb base %llx db_reg %p gts_reg %p qpshift %lu "
+	PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu "
 	     "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
 	     "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
 	     (unsigned)pci_resource_len(rdev->lldi.pdev, 2),
 	     (unsigned)pci_resource_len(rdev->lldi.pdev, 2),
-	     (u64)pci_resource_start(rdev->lldi.pdev, 2),
+	     (void *)pci_resource_start(rdev->lldi.pdev, 2),
 	     rdev->lldi.db_reg,
 	     rdev->lldi.db_reg,
 	     rdev->lldi.gts_reg,
 	     rdev->lldi.gts_reg,
 	     rdev->qpshift, rdev->qpmask,
 	     rdev->qpshift, rdev->qpmask,

+ 7 - 0
drivers/infiniband/hw/cxgb4/iw_cxgb4.h

@@ -137,6 +137,7 @@ struct c4iw_stats {
 	u64  tcam_full;
 	u64  tcam_full;
 	u64  act_ofld_conn_fails;
 	u64  act_ofld_conn_fails;
 	u64  pas_ofld_conn_fails;
 	u64  pas_ofld_conn_fails;
+	u64  neg_adv;
 };
 };
 
 
 struct c4iw_hw_queue {
 struct c4iw_hw_queue {
@@ -814,6 +815,11 @@ struct c4iw_listen_ep {
 	int backlog;
 	int backlog;
 };
 };
 
 
+struct c4iw_ep_stats {
+	unsigned connect_neg_adv;
+	unsigned abort_neg_adv;
+};
+
 struct c4iw_ep {
 struct c4iw_ep {
 	struct c4iw_ep_common com;
 	struct c4iw_ep_common com;
 	struct c4iw_ep *parent_ep;
 	struct c4iw_ep *parent_ep;
@@ -846,6 +852,7 @@ struct c4iw_ep {
 	unsigned int retry_count;
 	unsigned int retry_count;
 	int snd_win;
 	int snd_win;
 	int rcv_win;
 	int rcv_win;
+	struct c4iw_ep_stats stats;
 };
 };
 
 
 static inline void print_addr(struct c4iw_ep_common *epc, const char *func,
 static inline void print_addr(struct c4iw_ep_common *epc, const char *func,

+ 3 - 3
drivers/infiniband/hw/cxgb4/mem.c

@@ -144,7 +144,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
 		if (i == (num_wqe-1)) {
 		if (i == (num_wqe-1)) {
 			req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
 			req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
 						    FW_WR_COMPL_F);
 						    FW_WR_COMPL_F);
-			req->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait;
+			req->wr.wr_lo = (__force __be64)&wr_wait;
 		} else
 		} else
 			req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR));
 			req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR));
 		req->wr.wr_mid = cpu_to_be32(
 		req->wr.wr_mid = cpu_to_be32(
@@ -676,12 +676,12 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
 	mhp->attr.zbva = 0;
 	mhp->attr.zbva = 0;
 	mhp->attr.va_fbo = 0;
 	mhp->attr.va_fbo = 0;
 	mhp->attr.page_size = 0;
 	mhp->attr.page_size = 0;
-	mhp->attr.len = ~0UL;
+	mhp->attr.len = ~0ULL;
 	mhp->attr.pbl_size = 0;
 	mhp->attr.pbl_size = 0;
 
 
 	ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
 	ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
 			      FW_RI_STAG_NSMR, mhp->attr.perms,
 			      FW_RI_STAG_NSMR, mhp->attr.perms,
-			      mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0);
+			      mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0);
 	if (ret)
 	if (ret)
 		goto err1;
 		goto err1;
 
 

+ 5 - 5
drivers/infiniband/hw/cxgb4/qp.c

@@ -275,7 +275,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
 			FW_RI_RES_WR_NRES_V(2) |
 			FW_RI_RES_WR_NRES_V(2) |
 			FW_WR_COMPL_F);
 			FW_WR_COMPL_F);
 	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
 	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
-	res_wr->cookie = (unsigned long) &wr_wait;
+	res_wr->cookie = (uintptr_t)&wr_wait;
 	res = res_wr->res;
 	res = res_wr->res;
 	res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
 	res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
 	res->u.sqrq.op = FW_RI_RES_OP_WRITE;
 	res->u.sqrq.op = FW_RI_RES_OP_WRITE;
@@ -1209,7 +1209,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
 	wqe->flowid_len16 = cpu_to_be32(
 	wqe->flowid_len16 = cpu_to_be32(
 		FW_WR_FLOWID_V(ep->hwtid) |
 		FW_WR_FLOWID_V(ep->hwtid) |
 		FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
 		FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
-	wqe->cookie = (unsigned long) &ep->com.wr_wait;
+	wqe->cookie = (uintptr_t)&ep->com.wr_wait;
 
 
 	wqe->u.fini.type = FW_RI_TYPE_FINI;
 	wqe->u.fini.type = FW_RI_TYPE_FINI;
 	ret = c4iw_ofld_send(&rhp->rdev, skb);
 	ret = c4iw_ofld_send(&rhp->rdev, skb);
@@ -1279,7 +1279,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
 		FW_WR_FLOWID_V(qhp->ep->hwtid) |
 		FW_WR_FLOWID_V(qhp->ep->hwtid) |
 		FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
 		FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
 
 
-	wqe->cookie = (unsigned long) &qhp->ep->com.wr_wait;
+	wqe->cookie = (uintptr_t)&qhp->ep->com.wr_wait;
 
 
 	wqe->u.init.type = FW_RI_TYPE_INIT;
 	wqe->u.init.type = FW_RI_TYPE_INIT;
 	wqe->u.init.mpareqbit_p2ptype =
 	wqe->u.init.mpareqbit_p2ptype =
@@ -1766,11 +1766,11 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
 		mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
 		mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
 		insert_mmap(ucontext, mm2);
 		insert_mmap(ucontext, mm2);
 		mm3->key = uresp.sq_db_gts_key;
 		mm3->key = uresp.sq_db_gts_key;
-		mm3->addr = (__force unsigned long) qhp->wq.sq.udb;
+		mm3->addr = (__force unsigned long)qhp->wq.sq.udb;
 		mm3->len = PAGE_SIZE;
 		mm3->len = PAGE_SIZE;
 		insert_mmap(ucontext, mm3);
 		insert_mmap(ucontext, mm3);
 		mm4->key = uresp.rq_db_gts_key;
 		mm4->key = uresp.rq_db_gts_key;
-		mm4->addr = (__force unsigned long) qhp->wq.rq.udb;
+		mm4->addr = (__force unsigned long)qhp->wq.rq.udb;
 		mm4->len = PAGE_SIZE;
 		mm4->len = PAGE_SIZE;
 		insert_mmap(ucontext, mm4);
 		insert_mmap(ucontext, mm4);
 		if (mm5) {
 		if (mm5) {

+ 4 - 3
drivers/infiniband/hw/cxgb4/t4.h

@@ -539,6 +539,7 @@ struct t4_cq {
 	size_t memsize;
 	size_t memsize;
 	__be64 bits_type_ts;
 	__be64 bits_type_ts;
 	u32 cqid;
 	u32 cqid;
+	u32 qid_mask;
 	int vector;
 	int vector;
 	u16 size; /* including status page */
 	u16 size; /* including status page */
 	u16 cidx;
 	u16 cidx;
@@ -563,12 +564,12 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se)
 	set_bit(CQ_ARMED, &cq->flags);
 	set_bit(CQ_ARMED, &cq->flags);
 	while (cq->cidx_inc > CIDXINC_M) {
 	while (cq->cidx_inc > CIDXINC_M) {
 		val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7) |
 		val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7) |
-		      INGRESSQID_V(cq->cqid);
+		      INGRESSQID_V(cq->cqid & cq->qid_mask);
 		writel(val, cq->gts);
 		writel(val, cq->gts);
 		cq->cidx_inc -= CIDXINC_M;
 		cq->cidx_inc -= CIDXINC_M;
 	}
 	}
 	val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6) |
 	val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6) |
-	      INGRESSQID_V(cq->cqid);
+	      INGRESSQID_V(cq->cqid & cq->qid_mask);
 	writel(val, cq->gts);
 	writel(val, cq->gts);
 	cq->cidx_inc = 0;
 	cq->cidx_inc = 0;
 	return 0;
 	return 0;
@@ -601,7 +602,7 @@ static inline void t4_hwcq_consume(struct t4_cq *cq)
 		u32 val;
 		u32 val;
 
 
 		val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7) |
 		val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7) |
-		      INGRESSQID_V(cq->cqid);
+		      INGRESSQID_V(cq->cqid & cq->qid_mask);
 		writel(val, cq->gts);
 		writel(val, cq->gts);
 		cq->cidx_inc = 0;
 		cq->cidx_inc = 0;
 	}
 	}

+ 3 - 1
drivers/infiniband/hw/cxgb4/t4fw_ri_api.h

@@ -848,6 +848,8 @@ enum {                     /* TCP congestion control algorithms */
 #define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S)
 #define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S)
 #define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M)
 #define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M)
 
 
-#define CONG_CNTRL_VALID   (1 << 18)
+#define T5_ISS_S    18
+#define T5_ISS_V(x) ((x) << T5_ISS_S)
+#define T5_ISS_F    T5_ISS_V(1U)
 
 
 #endif /* _T4FW_RI_API_H_ */
 #endif /* _T4FW_RI_API_H_ */

+ 1 - 0
drivers/infiniband/hw/nes/nes.c

@@ -116,6 +116,7 @@ static struct ibnl_client_cbs nes_nl_cb_table[] = {
 	[RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
 	[RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
 	[RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
 	[RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
 	[RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
 	[RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
+	[RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
 	[RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
 	[RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
 	[RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
 	[RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
 	[RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
 	[RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}

+ 48 - 17
drivers/infiniband/hw/nes/nes_cm.c

@@ -596,27 +596,52 @@ static void nes_form_reg_msg(struct nes_vnic *nesvnic,
 	memcpy(pm_msg->if_name, nesvnic->netdev->name, IWPM_IFNAME_SIZE);
 	memcpy(pm_msg->if_name, nesvnic->netdev->name, IWPM_IFNAME_SIZE);
 }
 }
 
 
+static void record_sockaddr_info(struct sockaddr_storage *addr_info,
+					nes_addr_t *ip_addr, u16 *port_num)
+{
+	struct sockaddr_in *in_addr = (struct sockaddr_in *)addr_info;
+
+	if (in_addr->sin_family == AF_INET) {
+		*ip_addr = ntohl(in_addr->sin_addr.s_addr);
+		*port_num = ntohs(in_addr->sin_port);
+	}
+}
+
 /*
 /*
  * nes_record_pm_msg - Save the received mapping info
  * nes_record_pm_msg - Save the received mapping info
  */
  */
 static void nes_record_pm_msg(struct nes_cm_info *cm_info,
 static void nes_record_pm_msg(struct nes_cm_info *cm_info,
 			struct iwpm_sa_data *pm_msg)
 			struct iwpm_sa_data *pm_msg)
 {
 {
-	struct sockaddr_in *mapped_loc_addr =
-			(struct sockaddr_in *)&pm_msg->mapped_loc_addr;
-	struct sockaddr_in *mapped_rem_addr =
-			(struct sockaddr_in *)&pm_msg->mapped_rem_addr;
-
-	if (mapped_loc_addr->sin_family == AF_INET) {
-		cm_info->mapped_loc_addr =
-			ntohl(mapped_loc_addr->sin_addr.s_addr);
-		cm_info->mapped_loc_port = ntohs(mapped_loc_addr->sin_port);
-	}
-	if (mapped_rem_addr->sin_family == AF_INET) {
-		cm_info->mapped_rem_addr =
-			ntohl(mapped_rem_addr->sin_addr.s_addr);
-		cm_info->mapped_rem_port = ntohs(mapped_rem_addr->sin_port);
-	}
+	record_sockaddr_info(&pm_msg->mapped_loc_addr,
+		&cm_info->mapped_loc_addr, &cm_info->mapped_loc_port);
+
+	record_sockaddr_info(&pm_msg->mapped_rem_addr,
+		&cm_info->mapped_rem_addr, &cm_info->mapped_rem_port);
+}
+
+/*
+ * nes_get_reminfo - Get the address info of the remote connecting peer
+ */
+static int nes_get_remote_addr(struct nes_cm_node *cm_node)
+{
+	struct sockaddr_storage mapped_loc_addr, mapped_rem_addr;
+	struct sockaddr_storage remote_addr;
+	int ret;
+
+	nes_create_sockaddr(htonl(cm_node->mapped_loc_addr),
+			htons(cm_node->mapped_loc_port), &mapped_loc_addr);
+	nes_create_sockaddr(htonl(cm_node->mapped_rem_addr),
+			htons(cm_node->mapped_rem_port), &mapped_rem_addr);
+
+	ret = iwpm_get_remote_info(&mapped_loc_addr, &mapped_rem_addr,
+				&remote_addr, RDMA_NL_NES);
+	if (ret)
+		nes_debug(NES_DBG_CM, "Unable to find remote peer address info\n");
+	else
+		record_sockaddr_info(&remote_addr, &cm_node->rem_addr,
+				&cm_node->rem_port);
+	return ret;
 }
 }
 
 
 /**
 /**
@@ -1566,9 +1591,14 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
 		return NULL;
 		return NULL;
 
 
 	/* set our node specific transport info */
 	/* set our node specific transport info */
-	cm_node->loc_addr = cm_info->loc_addr;
+	if (listener) {
+		cm_node->loc_addr = listener->loc_addr;
+		cm_node->loc_port = listener->loc_port;
+	} else {
+		cm_node->loc_addr = cm_info->loc_addr;
+		cm_node->loc_port = cm_info->loc_port;
+	}
 	cm_node->rem_addr = cm_info->rem_addr;
 	cm_node->rem_addr = cm_info->rem_addr;
-	cm_node->loc_port = cm_info->loc_port;
 	cm_node->rem_port = cm_info->rem_port;
 	cm_node->rem_port = cm_info->rem_port;
 
 
 	cm_node->mapped_loc_addr = cm_info->mapped_loc_addr;
 	cm_node->mapped_loc_addr = cm_info->mapped_loc_addr;
@@ -2151,6 +2181,7 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
 		cm_node->state = NES_CM_STATE_ESTABLISHED;
 		cm_node->state = NES_CM_STATE_ESTABLISHED;
 		if (datasize) {
 		if (datasize) {
 			cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
 			cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+			nes_get_remote_addr(cm_node);
 			handle_rcv_mpa(cm_node, skb);
 			handle_rcv_mpa(cm_node, skb);
 		} else { /* rcvd ACK only */
 		} else { /* rcvd ACK only */
 			dev_kfree_skb_any(skb);
 			dev_kfree_skb_any(skb);

+ 0 - 1
drivers/infiniband/hw/qib/qib.h

@@ -1136,7 +1136,6 @@ extern struct qib_devdata *qib_lookup(int unit);
 extern u32 qib_cpulist_count;
 extern u32 qib_cpulist_count;
 extern unsigned long *qib_cpulist;
 extern unsigned long *qib_cpulist;
 
 
-extern unsigned qib_wc_pat;
 extern unsigned qib_cc_table_size;
 extern unsigned qib_cc_table_size;
 int qib_init(struct qib_devdata *, int);
 int qib_init(struct qib_devdata *, int);
 int init_chip_wc_pat(struct qib_devdata *dd, u32);
 int init_chip_wc_pat(struct qib_devdata *dd, u32);

+ 2 - 1
drivers/infiniband/hw/qib/qib_file_ops.c

@@ -835,7 +835,8 @@ static int mmap_piobufs(struct vm_area_struct *vma,
 	vma->vm_flags &= ~VM_MAYREAD;
 	vma->vm_flags &= ~VM_MAYREAD;
 	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
 	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
 
 
-	if (qib_wc_pat)
+	/* We used PAT if wc_cookie == 0 */
+	if (!dd->wc_cookie)
 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
 
 
 	ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
 	ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,

+ 3 - 5
drivers/infiniband/hw/qib/qib_iba6120.c

@@ -3315,11 +3315,9 @@ static int init_6120_variables(struct qib_devdata *dd)
 	qib_6120_config_ctxts(dd);
 	qib_6120_config_ctxts(dd);
 	qib_set_ctxtcnt(dd);
 	qib_set_ctxtcnt(dd);
 
 
-	if (qib_wc_pat) {
-		ret = init_chip_wc_pat(dd, 0);
-		if (ret)
-			goto bail;
-	}
+	ret = init_chip_wc_pat(dd, 0);
+	if (ret)
+		goto bail;
 	set_6120_baseaddrs(dd); /* set chip access pointers now */
 	set_6120_baseaddrs(dd); /* set chip access pointers now */
 
 
 	ret = 0;
 	ret = 0;

+ 3 - 5
drivers/infiniband/hw/qib/qib_iba7220.c

@@ -4126,11 +4126,9 @@ static int qib_init_7220_variables(struct qib_devdata *dd)
 	qib_7220_config_ctxts(dd);
 	qib_7220_config_ctxts(dd);
 	qib_set_ctxtcnt(dd);  /* needed for PAT setup */
 	qib_set_ctxtcnt(dd);  /* needed for PAT setup */
 
 
-	if (qib_wc_pat) {
-		ret = init_chip_wc_pat(dd, 0);
-		if (ret)
-			goto bail;
-	}
+	ret = init_chip_wc_pat(dd, 0);
+	if (ret)
+		goto bail;
 	set_7220_baseaddrs(dd); /* set chip access pointers now */
 	set_7220_baseaddrs(dd); /* set chip access pointers now */
 
 
 	ret = 0;
 	ret = 0;

+ 20 - 21
drivers/infiniband/hw/qib/qib_iba7322.c

@@ -6429,6 +6429,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
 	unsigned features, pidx, sbufcnt;
 	unsigned features, pidx, sbufcnt;
 	int ret, mtu;
 	int ret, mtu;
 	u32 sbufs, updthresh;
 	u32 sbufs, updthresh;
+	resource_size_t vl15off;
 
 
 	/* pport structs are contiguous, allocated after devdata */
 	/* pport structs are contiguous, allocated after devdata */
 	ppd = (struct qib_pportdata *)(dd + 1);
 	ppd = (struct qib_pportdata *)(dd + 1);
@@ -6677,29 +6678,27 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
 	qib_7322_config_ctxts(dd);
 	qib_7322_config_ctxts(dd);
 	qib_set_ctxtcnt(dd);
 	qib_set_ctxtcnt(dd);
 
 
-	if (qib_wc_pat) {
-		resource_size_t vl15off;
-		/*
-		 * We do not set WC on the VL15 buffers to avoid
-		 * a rare problem with unaligned writes from
-		 * interrupt-flushed store buffers, so we need
-		 * to map those separately here.  We can't solve
-		 * this for the rarely used mtrr case.
-		 */
-		ret = init_chip_wc_pat(dd, 0);
-		if (ret)
-			goto bail;
+	/*
+	 * We do not set WC on the VL15 buffers to avoid
+	 * a rare problem with unaligned writes from
+	 * interrupt-flushed store buffers, so we need
+	 * to map those separately here.  We can't solve
+	 * this for the rarely used mtrr case.
+	 */
+	ret = init_chip_wc_pat(dd, 0);
+	if (ret)
+		goto bail;
 
 
-		/* vl15 buffers start just after the 4k buffers */
-		vl15off = dd->physaddr + (dd->piobufbase >> 32) +
-			dd->piobcnt4k * dd->align4k;
-		dd->piovl15base	= ioremap_nocache(vl15off,
-						  NUM_VL15_BUFS * dd->align4k);
-		if (!dd->piovl15base) {
-			ret = -ENOMEM;
-			goto bail;
-		}
+	/* vl15 buffers start just after the 4k buffers */
+	vl15off = dd->physaddr + (dd->piobufbase >> 32) +
+		  dd->piobcnt4k * dd->align4k;
+	dd->piovl15base	= ioremap_nocache(vl15off,
+					  NUM_VL15_BUFS * dd->align4k);
+	if (!dd->piovl15base) {
+		ret = -ENOMEM;
+		goto bail;
 	}
 	}
+
 	qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
 	qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
 
 
 	ret = 0;
 	ret = 0;

+ 7 - 19
drivers/infiniband/hw/qib/qib_init.c

@@ -91,15 +91,6 @@ MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port");
 unsigned qib_cc_table_size;
 unsigned qib_cc_table_size;
 module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO);
 module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO);
 MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984");
 MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984");
-/*
- * qib_wc_pat parameter:
- *      0 is WC via MTRR
- *      1 is WC via PAT
- *      If PAT initialization fails, code reverts back to MTRR
- */
-unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */
-module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO);
-MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism");
 
 
 static void verify_interrupt(unsigned long);
 static void verify_interrupt(unsigned long);
 
 
@@ -1377,8 +1368,7 @@ static void cleanup_device_data(struct qib_devdata *dd)
 		spin_unlock(&dd->pport[pidx].cc_shadow_lock);
 		spin_unlock(&dd->pport[pidx].cc_shadow_lock);
 	}
 	}
 
 
-	if (!qib_wc_pat)
-		qib_disable_wc(dd);
+	qib_disable_wc(dd);
 
 
 	if (dd->pioavailregs_dma) {
 	if (dd->pioavailregs_dma) {
 		dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
 		dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
@@ -1547,14 +1537,12 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 		goto bail;
 		goto bail;
 	}
 	}
 
 
-	if (!qib_wc_pat) {
-		ret = qib_enable_wc(dd);
-		if (ret) {
-			qib_dev_err(dd,
-				"Write combining not enabled (err %d): performance may be poor\n",
-				-ret);
-			ret = 0;
-		}
+	ret = qib_enable_wc(dd);
+	if (ret) {
+		qib_dev_err(dd,
+			"Write combining not enabled (err %d): performance may be poor\n",
+			-ret);
+		ret = 0;
 	}
 	}
 
 
 	qib_verify_pioperf(dd);
 	qib_verify_pioperf(dd);

+ 4 - 27
drivers/infiniband/hw/qib/qib_wc_x86_64.c

@@ -116,21 +116,9 @@ int qib_enable_wc(struct qib_devdata *dd)
 	}
 	}
 
 
 	if (!ret) {
 	if (!ret) {
-		int cookie;
-
-		cookie = mtrr_add(pioaddr, piolen, MTRR_TYPE_WRCOMB, 0);
-		if (cookie < 0) {
-			{
-				qib_devinfo(dd->pcidev,
-					 "mtrr_add()  WC for PIO bufs failed (%d)\n",
-					 cookie);
-				ret = -EINVAL;
-			}
-		} else {
-			dd->wc_cookie = cookie;
-			dd->wc_base = (unsigned long) pioaddr;
-			dd->wc_len = (unsigned long) piolen;
-		}
+		dd->wc_cookie = arch_phys_wc_add(pioaddr, piolen);
+		if (dd->wc_cookie < 0)
+			ret = -EINVAL;
 	}
 	}
 
 
 	return ret;
 	return ret;
@@ -142,18 +130,7 @@ int qib_enable_wc(struct qib_devdata *dd)
  */
  */
 void qib_disable_wc(struct qib_devdata *dd)
 void qib_disable_wc(struct qib_devdata *dd)
 {
 {
-	if (dd->wc_cookie) {
-		int r;
-
-		r = mtrr_del(dd->wc_cookie, dd->wc_base,
-			     dd->wc_len);
-		if (r < 0)
-			qib_devinfo(dd->pcidev,
-				 "mtrr_del(%lx, %lx, %lx) failed: %d\n",
-				 dd->wc_cookie, dd->wc_base,
-				 dd->wc_len, r);
-		dd->wc_cookie = 0; /* even on failure */
-	}
+	arch_phys_wc_del(dd->wc_cookie);
 }
 }
 
 
 /**
 /**

+ 2 - 2
drivers/infiniband/ulp/ipoib/ipoib_cm.c

@@ -386,8 +386,8 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i
 					   rx->rx_ring[i].mapping,
 					   rx->rx_ring[i].mapping,
 					   GFP_KERNEL)) {
 					   GFP_KERNEL)) {
 			ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
 			ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
-				ret = -ENOMEM;
-				goto err_count;
+			ret = -ENOMEM;
+			goto err_count;
 		}
 		}
 		ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i);
 		ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i);
 		if (ret) {
 		if (ret) {

+ 7 - 7
drivers/media/platform/marvell-ccic/mcam-core.c

@@ -116,8 +116,8 @@ static struct mcam_format_struct {
 		.planar		= false,
 		.planar		= false,
 	},
 	},
 	{
 	{
-		.desc		= "UYVY 4:2:2",
-		.pixelformat	= V4L2_PIX_FMT_UYVY,
+		.desc		= "YVYU 4:2:2",
+		.pixelformat	= V4L2_PIX_FMT_YVYU,
 		.mbus_code	= MEDIA_BUS_FMT_YUYV8_2X8,
 		.mbus_code	= MEDIA_BUS_FMT_YUYV8_2X8,
 		.bpp		= 2,
 		.bpp		= 2,
 		.planar		= false,
 		.planar		= false,
@@ -748,7 +748,7 @@ static void mcam_ctlr_image(struct mcam_camera *cam)
 
 
 	switch (fmt->pixelformat) {
 	switch (fmt->pixelformat) {
 	case V4L2_PIX_FMT_YUYV:
 	case V4L2_PIX_FMT_YUYV:
-	case V4L2_PIX_FMT_UYVY:
+	case V4L2_PIX_FMT_YVYU:
 		widthy = fmt->width * 2;
 		widthy = fmt->width * 2;
 		widthuv = 0;
 		widthuv = 0;
 		break;
 		break;
@@ -784,15 +784,15 @@ static void mcam_ctlr_image(struct mcam_camera *cam)
 	case V4L2_PIX_FMT_YUV420:
 	case V4L2_PIX_FMT_YUV420:
 	case V4L2_PIX_FMT_YVU420:
 	case V4L2_PIX_FMT_YVU420:
 		mcam_reg_write_mask(cam, REG_CTRL0,
 		mcam_reg_write_mask(cam, REG_CTRL0,
-			C0_DF_YUV | C0_YUV_420PL | C0_YUVE_YVYU, C0_DF_MASK);
+			C0_DF_YUV | C0_YUV_420PL | C0_YUVE_VYUY, C0_DF_MASK);
 		break;
 		break;
 	case V4L2_PIX_FMT_YUYV:
 	case V4L2_PIX_FMT_YUYV:
 		mcam_reg_write_mask(cam, REG_CTRL0,
 		mcam_reg_write_mask(cam, REG_CTRL0,
-			C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_UYVY, C0_DF_MASK);
+			C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_NOSWAP, C0_DF_MASK);
 		break;
 		break;
-	case V4L2_PIX_FMT_UYVY:
+	case V4L2_PIX_FMT_YVYU:
 		mcam_reg_write_mask(cam, REG_CTRL0,
 		mcam_reg_write_mask(cam, REG_CTRL0,
-			C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_YUYV, C0_DF_MASK);
+			C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_SWAP24, C0_DF_MASK);
 		break;
 		break;
 	case V4L2_PIX_FMT_JPEG:
 	case V4L2_PIX_FMT_JPEG:
 		mcam_reg_write_mask(cam, REG_CTRL0,
 		mcam_reg_write_mask(cam, REG_CTRL0,

+ 4 - 4
drivers/media/platform/marvell-ccic/mcam-core.h

@@ -330,10 +330,10 @@ int mccic_resume(struct mcam_camera *cam);
 #define	  C0_YUVE_YVYU	  0x00010000	/* Y1CrY0Cb		*/
 #define	  C0_YUVE_YVYU	  0x00010000	/* Y1CrY0Cb		*/
 #define	  C0_YUVE_VYUY	  0x00020000	/* CrY1CbY0		*/
 #define	  C0_YUVE_VYUY	  0x00020000	/* CrY1CbY0		*/
 #define	  C0_YUVE_UYVY	  0x00030000	/* CbY1CrY0		*/
 #define	  C0_YUVE_UYVY	  0x00030000	/* CbY1CrY0		*/
-#define	  C0_YUVE_XYUV	  0x00000000	/* 420: .YUV		*/
-#define	  C0_YUVE_XYVU	  0x00010000	/* 420: .YVU		*/
-#define	  C0_YUVE_XUVY	  0x00020000	/* 420: .UVY		*/
-#define	  C0_YUVE_XVUY	  0x00030000	/* 420: .VUY		*/
+#define	  C0_YUVE_NOSWAP  0x00000000	/* no bytes swapping	*/
+#define	  C0_YUVE_SWAP13  0x00010000	/* swap byte 1 and 3	*/
+#define	  C0_YUVE_SWAP24  0x00020000	/* swap byte 2 and 4	*/
+#define	  C0_YUVE_SWAP1324 0x00030000	/* swap bytes 1&3 and 2&4 */
 /* Bayer bits 18,19 if needed */
 /* Bayer bits 18,19 if needed */
 #define	  C0_EOF_VSYNC	  0x00400000	/* Generate EOF by VSYNC */
 #define	  C0_EOF_VSYNC	  0x00400000	/* Generate EOF by VSYNC */
 #define	  C0_VEDGE_CTRL   0x00800000	/* Detect falling edge of VSYNC */
 #define	  C0_VEDGE_CTRL   0x00800000	/* Detect falling edge of VSYNC */

+ 6 - 1
drivers/media/platform/soc_camera/rcar_vin.c

@@ -135,6 +135,8 @@
 #define VIN_MAX_WIDTH		2048
 #define VIN_MAX_WIDTH		2048
 #define VIN_MAX_HEIGHT		2048
 #define VIN_MAX_HEIGHT		2048
 
 
+#define TIMEOUT_MS		100
+
 enum chip_id {
 enum chip_id {
 	RCAR_GEN2,
 	RCAR_GEN2,
 	RCAR_H1,
 	RCAR_H1,
@@ -820,7 +822,10 @@ static void rcar_vin_wait_stop_streaming(struct rcar_vin_priv *priv)
 		if (priv->state == STOPPING) {
 		if (priv->state == STOPPING) {
 			priv->request_to_stop = true;
 			priv->request_to_stop = true;
 			spin_unlock_irq(&priv->lock);
 			spin_unlock_irq(&priv->lock);
-			wait_for_completion(&priv->capture_stop);
+			if (!wait_for_completion_timeout(
+					&priv->capture_stop,
+					msecs_to_jiffies(TIMEOUT_MS)))
+				priv->state = STOPPED;
 			spin_lock_irq(&priv->lock);
 			spin_lock_irq(&priv->lock);
 		}
 		}
 	}
 	}

+ 4 - 6
drivers/pinctrl/core.c

@@ -1110,7 +1110,7 @@ void devm_pinctrl_put(struct pinctrl *p)
 EXPORT_SYMBOL_GPL(devm_pinctrl_put);
 EXPORT_SYMBOL_GPL(devm_pinctrl_put);
 
 
 int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
 int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
-			 bool dup, bool locked)
+			 bool dup)
 {
 {
 	int i, ret;
 	int i, ret;
 	struct pinctrl_maps *maps_node;
 	struct pinctrl_maps *maps_node;
@@ -1178,11 +1178,9 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
 		maps_node->maps = maps;
 		maps_node->maps = maps;
 	}
 	}
 
 
-	if (!locked)
-		mutex_lock(&pinctrl_maps_mutex);
+	mutex_lock(&pinctrl_maps_mutex);
 	list_add_tail(&maps_node->node, &pinctrl_maps);
 	list_add_tail(&maps_node->node, &pinctrl_maps);
-	if (!locked)
-		mutex_unlock(&pinctrl_maps_mutex);
+	mutex_unlock(&pinctrl_maps_mutex);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -1197,7 +1195,7 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
 int pinctrl_register_mappings(struct pinctrl_map const *maps,
 int pinctrl_register_mappings(struct pinctrl_map const *maps,
 			      unsigned num_maps)
 			      unsigned num_maps)
 {
 {
-	return pinctrl_register_map(maps, num_maps, true, false);
+	return pinctrl_register_map(maps, num_maps, true);
 }
 }
 
 
 void pinctrl_unregister_map(struct pinctrl_map const *map)
 void pinctrl_unregister_map(struct pinctrl_map const *map)

+ 1 - 1
drivers/pinctrl/core.h

@@ -183,7 +183,7 @@ static inline struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev,
 }
 }
 
 
 int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
 int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
-			 bool dup, bool locked);
+			 bool dup);
 void pinctrl_unregister_map(struct pinctrl_map const *map);
 void pinctrl_unregister_map(struct pinctrl_map const *map);
 
 
 extern int pinctrl_force_sleep(struct pinctrl_dev *pctldev);
 extern int pinctrl_force_sleep(struct pinctrl_dev *pctldev);

+ 1 - 1
drivers/pinctrl/devicetree.c

@@ -92,7 +92,7 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
 	dt_map->num_maps = num_maps;
 	dt_map->num_maps = num_maps;
 	list_add_tail(&dt_map->node, &p->dt_maps);
 	list_add_tail(&dt_map->node, &p->dt_maps);
 
 
-	return pinctrl_register_map(map, num_maps, false, true);
+	return pinctrl_register_map(map, num_maps, false);
 }
 }
 
 
 struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
 struct pinctrl_dev *of_pinctrl_get(struct device_node *np)

+ 2 - 0
drivers/pinctrl/mediatek/pinctrl-mtk-common.c

@@ -881,6 +881,8 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
 	if (!mtk_eint_get_mask(pctl, eint_num)) {
 	if (!mtk_eint_get_mask(pctl, eint_num)) {
 		mtk_eint_mask(d);
 		mtk_eint_mask(d);
 		unmask = 1;
 		unmask = 1;
+	} else {
+		unmask = 0;
 	}
 	}
 
 
 	clr_bit = 0xff << eint_offset;
 	clr_bit = 0xff << eint_offset;

+ 1 - 1
drivers/pinctrl/mvebu/pinctrl-armada-370.c

@@ -364,7 +364,7 @@ static struct mvebu_mpp_mode mv88f6710_mpp_modes[] = {
 	   MPP_FUNCTION(0x5, "audio", "mclk"),
 	   MPP_FUNCTION(0x5, "audio", "mclk"),
 	   MPP_FUNCTION(0x6, "uart0", "cts")),
 	   MPP_FUNCTION(0x6, "uart0", "cts")),
 	MPP_MODE(63,
 	MPP_MODE(63,
-	   MPP_FUNCTION(0x0, "gpo", NULL),
+	   MPP_FUNCTION(0x0, "gpio", NULL),
 	   MPP_FUNCTION(0x1, "spi0", "sck"),
 	   MPP_FUNCTION(0x1, "spi0", "sck"),
 	   MPP_FUNCTION(0x2, "tclk", NULL)),
 	   MPP_FUNCTION(0x2, "tclk", NULL)),
 	MPP_MODE(64,
 	MPP_MODE(64,

+ 1 - 0
drivers/pinctrl/qcom/pinctrl-spmi-gpio.c

@@ -260,6 +260,7 @@ static int pmic_gpio_set_mux(struct pinctrl_dev *pctldev, unsigned function,
 			val = 1;
 			val = 1;
 	}
 	}
 
 
+	val = val << PMIC_GPIO_REG_MODE_DIR_SHIFT;
 	val |= pad->function << PMIC_GPIO_REG_MODE_FUNCTION_SHIFT;
 	val |= pad->function << PMIC_GPIO_REG_MODE_FUNCTION_SHIFT;
 	val |= pad->out_value & PMIC_GPIO_REG_MODE_VALUE_SHIFT;
 	val |= pad->out_value & PMIC_GPIO_REG_MODE_VALUE_SHIFT;
 
 

+ 6 - 4
drivers/pinctrl/qcom/pinctrl-spmi-mpp.c

@@ -370,6 +370,7 @@ static int pmic_mpp_set_mux(struct pinctrl_dev *pctldev, unsigned function,
 		}
 		}
 	}
 	}
 
 
+	val = val << PMIC_MPP_REG_MODE_DIR_SHIFT;
 	val |= pad->function << PMIC_MPP_REG_MODE_FUNCTION_SHIFT;
 	val |= pad->function << PMIC_MPP_REG_MODE_FUNCTION_SHIFT;
 	val |= pad->out_value & PMIC_MPP_REG_MODE_VALUE_MASK;
 	val |= pad->out_value & PMIC_MPP_REG_MODE_VALUE_MASK;
 
 
@@ -576,10 +577,11 @@ static void pmic_mpp_config_dbg_show(struct pinctrl_dev *pctldev,
 
 
 		if (pad->input_enabled) {
 		if (pad->input_enabled) {
 			ret = pmic_mpp_read(state, pad, PMIC_MPP_REG_RT_STS);
 			ret = pmic_mpp_read(state, pad, PMIC_MPP_REG_RT_STS);
-			if (!ret) {
-				ret &= PMIC_MPP_REG_RT_STS_VAL_MASK;
-				pad->out_value = ret;
-			}
+			if (ret < 0)
+				return;
+
+			ret &= PMIC_MPP_REG_RT_STS_VAL_MASK;
+			pad->out_value = ret;
 		}
 		}
 
 
 		seq_printf(s, " %-4s", pad->output_enabled ? "out" : "in");
 		seq_printf(s, " %-4s", pad->output_enabled ? "out" : "in");

+ 7 - 0
drivers/platform/x86/ideapad-laptop.c

@@ -829,6 +829,13 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
  * report all radios as hardware-blocked.
  * report all radios as hardware-blocked.
  */
  */
 static const struct dmi_system_id no_hw_rfkill_list[] = {
 static const struct dmi_system_id no_hw_rfkill_list[] = {
+	{
+		.ident = "Lenovo G40-30",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+			DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo G40-30"),
+		},
+	},
 	{
 	{
 		.ident = "Lenovo Yoga 2 11 / 13 / Pro",
 		.ident = "Lenovo Yoga 2 11 / 13 / Pro",
 		.matches = {
 		.matches = {

+ 1 - 1
drivers/platform/x86/thinkpad_acpi.c

@@ -2115,7 +2115,7 @@ static int hotkey_mask_get(void)
 	return 0;
 	return 0;
 }
 }
 
 
-void static hotkey_mask_warn_incomplete_mask(void)
+static void hotkey_mask_warn_incomplete_mask(void)
 {
 {
 	/* log only what the user can fix... */
 	/* log only what the user can fix... */
 	const u32 wantedmask = hotkey_driver_mask &
 	const u32 wantedmask = hotkey_driver_mask &

+ 10 - 0
drivers/rtc/Kconfig

@@ -164,6 +164,16 @@ config RTC_DRV_ABB5ZES3
 	  This driver can also be built as a module. If so, the module
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-ab-b5ze-s3.
 	  will be called rtc-ab-b5ze-s3.
 
 
+config RTC_DRV_ABX80X
+	tristate "Abracon ABx80x"
+	help
+	  If you say yes here you get support for Abracon AB080X and AB180X
+	  families of ultra-low-power  battery- and capacitor-backed real-time
+	  clock chips.
+
+	  This driver can also be built as a module. If so, the module
+	  will be called rtc-abx80x.
+
 config RTC_DRV_AS3722
 config RTC_DRV_AS3722
 	tristate "ams AS3722 RTC driver"
 	tristate "ams AS3722 RTC driver"
 	depends on MFD_AS3722
 	depends on MFD_AS3722

+ 1 - 0
drivers/rtc/Makefile

@@ -25,6 +25,7 @@ obj-$(CONFIG_RTC_DRV_88PM80X)	+= rtc-88pm80x.o
 obj-$(CONFIG_RTC_DRV_AB3100)	+= rtc-ab3100.o
 obj-$(CONFIG_RTC_DRV_AB3100)	+= rtc-ab3100.o
 obj-$(CONFIG_RTC_DRV_AB8500)	+= rtc-ab8500.o
 obj-$(CONFIG_RTC_DRV_AB8500)	+= rtc-ab8500.o
 obj-$(CONFIG_RTC_DRV_ABB5ZES3)	+= rtc-ab-b5ze-s3.o
 obj-$(CONFIG_RTC_DRV_ABB5ZES3)	+= rtc-ab-b5ze-s3.o
+obj-$(CONFIG_RTC_DRV_ABX80X)	+= rtc-abx80x.o
 obj-$(CONFIG_RTC_DRV_ARMADA38X)	+= rtc-armada38x.o
 obj-$(CONFIG_RTC_DRV_ARMADA38X)	+= rtc-armada38x.o
 obj-$(CONFIG_RTC_DRV_AS3722)	+= rtc-as3722.o
 obj-$(CONFIG_RTC_DRV_AS3722)	+= rtc-as3722.o
 obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o
 obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o

+ 307 - 0
drivers/rtc/rtc-abx80x.c

@@ -0,0 +1,307 @@
+/*
+ * A driver for the I2C members of the Abracon AB x8xx RTC family,
+ * and compatible: AB 1805 and AB 0805
+ *
+ * Copyright 2014-2015 Macq S.A.
+ *
+ * Author: Philippe De Muyter <phdm@macqel.be>
+ * Author: Alexandre Belloni <alexandre.belloni@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/bcd.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/rtc.h>
+
+#define ABX8XX_REG_HTH		0x00
+#define ABX8XX_REG_SC		0x01
+#define ABX8XX_REG_MN		0x02
+#define ABX8XX_REG_HR		0x03
+#define ABX8XX_REG_DA		0x04
+#define ABX8XX_REG_MO		0x05
+#define ABX8XX_REG_YR		0x06
+#define ABX8XX_REG_WD		0x07
+
+#define ABX8XX_REG_CTRL1	0x10
+#define ABX8XX_CTRL_WRITE	BIT(1)
+#define ABX8XX_CTRL_12_24	BIT(6)
+
+#define ABX8XX_REG_CFG_KEY	0x1f
+#define ABX8XX_CFG_KEY_MISC	0x9d
+
+#define ABX8XX_REG_ID0		0x28
+
+#define ABX8XX_REG_TRICKLE	0x20
+#define ABX8XX_TRICKLE_CHARGE_ENABLE	0xa0
+#define ABX8XX_TRICKLE_STANDARD_DIODE	0x8
+#define ABX8XX_TRICKLE_SCHOTTKY_DIODE	0x4
+
+static u8 trickle_resistors[] = {0, 3, 6, 11};
+
+enum abx80x_chip {AB0801, AB0803, AB0804, AB0805,
+	AB1801, AB1803, AB1804, AB1805, ABX80X};
+
+struct abx80x_cap {
+	u16 pn;
+	bool has_tc;
+};
+
+static struct abx80x_cap abx80x_caps[] = {
+	[AB0801] = {.pn = 0x0801},
+	[AB0803] = {.pn = 0x0803},
+	[AB0804] = {.pn = 0x0804, .has_tc = true},
+	[AB0805] = {.pn = 0x0805, .has_tc = true},
+	[AB1801] = {.pn = 0x1801},
+	[AB1803] = {.pn = 0x1803},
+	[AB1804] = {.pn = 0x1804, .has_tc = true},
+	[AB1805] = {.pn = 0x1805, .has_tc = true},
+	[ABX80X] = {.pn = 0}
+};
+
+static struct i2c_driver abx80x_driver;
+
+static int abx80x_enable_trickle_charger(struct i2c_client *client,
+					 u8 trickle_cfg)
+{
+	int err;
+
+	/*
+	 * Write the configuration key register to enable access to the Trickle
+	 * register
+	 */
+	err = i2c_smbus_write_byte_data(client, ABX8XX_REG_CFG_KEY,
+					ABX8XX_CFG_KEY_MISC);
+	if (err < 0) {
+		dev_err(&client->dev, "Unable to write configuration key\n");
+		return -EIO;
+	}
+
+	err = i2c_smbus_write_byte_data(client, ABX8XX_REG_TRICKLE,
+					ABX8XX_TRICKLE_CHARGE_ENABLE |
+					trickle_cfg);
+	if (err < 0) {
+		dev_err(&client->dev, "Unable to write trickle register\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int abx80x_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	unsigned char buf[8];
+	int err;
+
+	err = i2c_smbus_read_i2c_block_data(client, ABX8XX_REG_HTH,
+					    sizeof(buf), buf);
+	if (err < 0) {
+		dev_err(&client->dev, "Unable to read date\n");
+		return -EIO;
+	}
+
+	tm->tm_sec = bcd2bin(buf[ABX8XX_REG_SC] & 0x7F);
+	tm->tm_min = bcd2bin(buf[ABX8XX_REG_MN] & 0x7F);
+	tm->tm_hour = bcd2bin(buf[ABX8XX_REG_HR] & 0x3F);
+	tm->tm_wday = buf[ABX8XX_REG_WD] & 0x7;
+	tm->tm_mday = bcd2bin(buf[ABX8XX_REG_DA] & 0x3F);
+	tm->tm_mon = bcd2bin(buf[ABX8XX_REG_MO] & 0x1F) - 1;
+	tm->tm_year = bcd2bin(buf[ABX8XX_REG_YR]) + 100;
+
+	err = rtc_valid_tm(tm);
+	if (err < 0)
+		dev_err(&client->dev, "retrieved date/time is not valid.\n");
+
+	return err;
+}
+
+static int abx80x_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	unsigned char buf[8];
+	int err;
+
+	if (tm->tm_year < 100)
+		return -EINVAL;
+
+	buf[ABX8XX_REG_HTH] = 0;
+	buf[ABX8XX_REG_SC] = bin2bcd(tm->tm_sec);
+	buf[ABX8XX_REG_MN] = bin2bcd(tm->tm_min);
+	buf[ABX8XX_REG_HR] = bin2bcd(tm->tm_hour);
+	buf[ABX8XX_REG_DA] = bin2bcd(tm->tm_mday);
+	buf[ABX8XX_REG_MO] = bin2bcd(tm->tm_mon + 1);
+	buf[ABX8XX_REG_YR] = bin2bcd(tm->tm_year - 100);
+	buf[ABX8XX_REG_WD] = tm->tm_wday;
+
+	err = i2c_smbus_write_i2c_block_data(client, ABX8XX_REG_HTH,
+					     sizeof(buf), buf);
+	if (err < 0) {
+		dev_err(&client->dev, "Unable to write to date registers\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static const struct rtc_class_ops abx80x_rtc_ops = {
+	.read_time	= abx80x_rtc_read_time,
+	.set_time	= abx80x_rtc_set_time,
+};
+
+static int abx80x_dt_trickle_cfg(struct device_node *np)
+{
+	const char *diode;
+	int trickle_cfg = 0;
+	int i, ret;
+	u32 tmp;
+
+	ret = of_property_read_string(np, "abracon,tc-diode", &diode);
+	if (ret)
+		return ret;
+
+	if (!strcmp(diode, "standard"))
+		trickle_cfg |= ABX8XX_TRICKLE_STANDARD_DIODE;
+	else if (!strcmp(diode, "schottky"))
+		trickle_cfg |= ABX8XX_TRICKLE_SCHOTTKY_DIODE;
+	else
+		return -EINVAL;
+
+	ret = of_property_read_u32(np, "abracon,tc-resistor", &tmp);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < sizeof(trickle_resistors); i++)
+		if (trickle_resistors[i] == tmp)
+			break;
+
+	if (i == sizeof(trickle_resistors))
+		return -EINVAL;
+
+	return (trickle_cfg | i);
+}
+
+static int abx80x_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	struct device_node *np = client->dev.of_node;
+	struct rtc_device *rtc;
+	int i, data, err, trickle_cfg = -EINVAL;
+	char buf[7];
+	unsigned int part = id->driver_data;
+	unsigned int partnumber;
+	unsigned int majrev, minrev;
+	unsigned int lot;
+	unsigned int wafer;
+	unsigned int uid;
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+		return -ENODEV;
+
+	err = i2c_smbus_read_i2c_block_data(client, ABX8XX_REG_ID0,
+					    sizeof(buf), buf);
+	if (err < 0) {
+		dev_err(&client->dev, "Unable to read partnumber\n");
+		return -EIO;
+	}
+
+	partnumber = (buf[0] << 8) | buf[1];
+	majrev = buf[2] >> 3;
+	minrev = buf[2] & 0x7;
+	lot = ((buf[4] & 0x80) << 2) | ((buf[6] & 0x80) << 1) | buf[3];
+	uid = ((buf[4] & 0x7f) << 8) | buf[5];
+	wafer = (buf[6] & 0x7c) >> 2;
+	dev_info(&client->dev, "model %04x, revision %u.%u, lot %x, wafer %x, uid %x\n",
+		 partnumber, majrev, minrev, lot, wafer, uid);
+
+	data = i2c_smbus_read_byte_data(client, ABX8XX_REG_CTRL1);
+	if (data < 0) {
+		dev_err(&client->dev, "Unable to read control register\n");
+		return -EIO;
+	}
+
+	err = i2c_smbus_write_byte_data(client, ABX8XX_REG_CTRL1,
+					((data & ~ABX8XX_CTRL_12_24) |
+					 ABX8XX_CTRL_WRITE));
+	if (err < 0) {
+		dev_err(&client->dev, "Unable to write control register\n");
+		return -EIO;
+	}
+
+	/* part autodetection */
+	if (part == ABX80X) {
+		for (i = 0; abx80x_caps[i].pn; i++)
+			if (partnumber == abx80x_caps[i].pn)
+				break;
+		if (abx80x_caps[i].pn == 0) {
+			dev_err(&client->dev, "Unknown part: %04x\n",
+				partnumber);
+			return -EINVAL;
+		}
+		part = i;
+	}
+
+	if (partnumber != abx80x_caps[part].pn) {
+		dev_err(&client->dev, "partnumber mismatch %04x != %04x\n",
+			partnumber, abx80x_caps[part].pn);
+		return -EINVAL;
+	}
+
+	if (np && abx80x_caps[part].has_tc)
+		trickle_cfg = abx80x_dt_trickle_cfg(np);
+
+	if (trickle_cfg > 0) {
+		dev_info(&client->dev, "Enabling trickle charger: %02x\n",
+			 trickle_cfg);
+		abx80x_enable_trickle_charger(client, trickle_cfg);
+	}
+
+	rtc = devm_rtc_device_register(&client->dev, abx80x_driver.driver.name,
+				       &abx80x_rtc_ops, THIS_MODULE);
+
+	if (IS_ERR(rtc))
+		return PTR_ERR(rtc);
+
+	i2c_set_clientdata(client, rtc);
+
+	return 0;
+}
+
+static int abx80x_remove(struct i2c_client *client)
+{
+	return 0;
+}
+
+static const struct i2c_device_id abx80x_id[] = {
+	{ "abx80x", ABX80X },
+	{ "ab0801", AB0801 },
+	{ "ab0803", AB0803 },
+	{ "ab0804", AB0804 },
+	{ "ab0805", AB0805 },
+	{ "ab1801", AB1801 },
+	{ "ab1803", AB1803 },
+	{ "ab1804", AB1804 },
+	{ "ab1805", AB1805 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, abx80x_id);
+
+static struct i2c_driver abx80x_driver = {
+	.driver		= {
+		.name	= "rtc-abx80x",
+	},
+	.probe		= abx80x_probe,
+	.remove		= abx80x_remove,
+	.id_table	= abx80x_id,
+};
+
+module_i2c_driver(abx80x_driver);
+
+MODULE_AUTHOR("Philippe De Muyter <phdm@macqel.be>");
+MODULE_AUTHOR("Alexandre Belloni <alexandre.belloni@free-electrons.com>");
+MODULE_DESCRIPTION("Abracon ABX80X RTC driver");
+MODULE_LICENSE("GPL v2");

+ 12 - 12
drivers/rtc/rtc-armada38x.c

@@ -40,6 +40,13 @@ struct armada38x_rtc {
 	void __iomem	    *regs;
 	void __iomem	    *regs;
 	void __iomem	    *regs_soc;
 	void __iomem	    *regs_soc;
 	spinlock_t	    lock;
 	spinlock_t	    lock;
+	/*
+	 * While setting the time, the RTC TIME register should not be
+	 * accessed. Setting the RTC time involves sleeping during
+	 * 100ms, so a mutex instead of a spinlock is used to protect
+	 * it
+	 */
+	struct mutex	    mutex_time;
 	int		    irq;
 	int		    irq;
 };
 };
 
 
@@ -59,8 +66,7 @@ static int armada38x_rtc_read_time(struct device *dev, struct rtc_time *tm)
 	struct armada38x_rtc *rtc = dev_get_drvdata(dev);
 	struct armada38x_rtc *rtc = dev_get_drvdata(dev);
 	unsigned long time, time_check, flags;
 	unsigned long time, time_check, flags;
 
 
-	spin_lock_irqsave(&rtc->lock, flags);
-
+	mutex_lock(&rtc->mutex_time);
 	time = readl(rtc->regs + RTC_TIME);
 	time = readl(rtc->regs + RTC_TIME);
 	/*
 	/*
 	 * WA for failing time set attempts. As stated in HW ERRATA if
 	 * WA for failing time set attempts. As stated in HW ERRATA if
@@ -71,7 +77,7 @@ static int armada38x_rtc_read_time(struct device *dev, struct rtc_time *tm)
 	if ((time_check - time) > 1)
 	if ((time_check - time) > 1)
 		time_check = readl(rtc->regs + RTC_TIME);
 		time_check = readl(rtc->regs + RTC_TIME);
 
 
-	spin_unlock_irqrestore(&rtc->lock, flags);
+	mutex_unlock(&rtc->mutex_time);
 
 
 	rtc_time_to_tm(time_check, tm);
 	rtc_time_to_tm(time_check, tm);
 
 
@@ -94,19 +100,12 @@ static int armada38x_rtc_set_time(struct device *dev, struct rtc_time *tm)
 	 * then wait for 100ms before writing to the time register to be
 	 * then wait for 100ms before writing to the time register to be
 	 * sure that the data will be taken into account.
 	 * sure that the data will be taken into account.
 	 */
 	 */
-	spin_lock_irqsave(&rtc->lock, flags);
-
+	mutex_lock(&rtc->mutex_time);
 	rtc_delayed_write(0, rtc, RTC_STATUS);
 	rtc_delayed_write(0, rtc, RTC_STATUS);
-
-	spin_unlock_irqrestore(&rtc->lock, flags);
-
 	msleep(100);
 	msleep(100);
-
-	spin_lock_irqsave(&rtc->lock, flags);
-
 	rtc_delayed_write(time, rtc, RTC_TIME);
 	rtc_delayed_write(time, rtc, RTC_TIME);
+	mutex_unlock(&rtc->mutex_time);
 
 
-	spin_unlock_irqrestore(&rtc->lock, flags);
 out:
 out:
 	return ret;
 	return ret;
 }
 }
@@ -230,6 +229,7 @@ static __init int armada38x_rtc_probe(struct platform_device *pdev)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
 	spin_lock_init(&rtc->lock);
 	spin_lock_init(&rtc->lock);
+	mutex_init(&rtc->mutex_time);
 
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rtc");
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rtc");
 	rtc->regs = devm_ioremap_resource(&pdev->dev, res);
 	rtc->regs = devm_ioremap_resource(&pdev->dev, res);

+ 1 - 0
drivers/staging/media/omap4iss/Kconfig

@@ -2,6 +2,7 @@ config VIDEO_OMAP4
 	bool "OMAP 4 Camera support"
 	bool "OMAP 4 Camera support"
 	depends on VIDEO_V4L2=y && VIDEO_V4L2_SUBDEV_API && I2C=y && ARCH_OMAP4
 	depends on VIDEO_V4L2=y && VIDEO_V4L2_SUBDEV_API && I2C=y && ARCH_OMAP4
 	depends on HAS_DMA
 	depends on HAS_DMA
+	select MFD_SYSCON
 	select VIDEOBUF2_DMA_CONTIG
 	select VIDEOBUF2_DMA_CONTIG
 	---help---
 	---help---
 	  Driver for an OMAP 4 ISS controller.
 	  Driver for an OMAP 4 ISS controller.

+ 11 - 0
drivers/staging/media/omap4iss/iss.c

@@ -17,6 +17,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/dma-mapping.h>
 #include <linux/i2c.h>
 #include <linux/i2c.h>
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
@@ -1386,6 +1387,16 @@ static int iss_probe(struct platform_device *pdev)
 
 
 	platform_set_drvdata(pdev, iss);
 	platform_set_drvdata(pdev, iss);
 
 
+	/*
+	 * TODO: When implementing DT support switch to syscon regmap lookup by
+	 * phandle.
+	 */
+	iss->syscon = syscon_regmap_lookup_by_compatible("syscon");
+	if (IS_ERR(iss->syscon)) {
+		ret = PTR_ERR(iss->syscon);
+		goto error;
+	}
+
 	/* Clocks */
 	/* Clocks */
 	ret = iss_map_mem_resource(pdev, iss, OMAP4_ISS_MEM_TOP);
 	ret = iss_map_mem_resource(pdev, iss, OMAP4_ISS_MEM_TOP);
 	if (ret < 0)
 	if (ret < 0)

+ 4 - 0
drivers/staging/media/omap4iss/iss.h

@@ -29,6 +29,8 @@
 #include "iss_ipipe.h"
 #include "iss_ipipe.h"
 #include "iss_resizer.h"
 #include "iss_resizer.h"
 
 
+struct regmap;
+
 #define to_iss_device(ptr_module)				\
 #define to_iss_device(ptr_module)				\
 	container_of(ptr_module, struct iss_device, ptr_module)
 	container_of(ptr_module, struct iss_device, ptr_module)
 #define to_device(ptr_module)						\
 #define to_device(ptr_module)						\
@@ -79,6 +81,7 @@ struct iss_reg {
 
 
 /*
 /*
  * struct iss_device - ISS device structure.
  * struct iss_device - ISS device structure.
+ * @syscon: Regmap for the syscon register space
  * @crashed: Bitmask of crashed entities (indexed by entity ID)
  * @crashed: Bitmask of crashed entities (indexed by entity ID)
  */
  */
 struct iss_device {
 struct iss_device {
@@ -93,6 +96,7 @@ struct iss_device {
 
 
 	struct resource *res[OMAP4_ISS_MEM_LAST];
 	struct resource *res[OMAP4_ISS_MEM_LAST];
 	void __iomem *regs[OMAP4_ISS_MEM_LAST];
 	void __iomem *regs[OMAP4_ISS_MEM_LAST];
+	struct regmap *syscon;
 
 
 	u64 raw_dmamask;
 	u64 raw_dmamask;
 
 

+ 7 - 5
drivers/staging/media/omap4iss/iss_csiphy.c

@@ -13,6 +13,7 @@
 
 
 #include <linux/delay.h>
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/device.h>
+#include <linux/regmap.h>
 
 
 #include "../../../../arch/arm/mach-omap2/control.h"
 #include "../../../../arch/arm/mach-omap2/control.h"
 
 
@@ -140,9 +141,11 @@ int omap4iss_csiphy_config(struct iss_device *iss,
 	 * - bit [18] : CSIPHY1 CTRLCLK enable
 	 * - bit [18] : CSIPHY1 CTRLCLK enable
 	 * - bit [17:16] : CSIPHY1 config: 00 d-phy, 01/10 ccp2
 	 * - bit [17:16] : CSIPHY1 config: 00 d-phy, 01/10 ccp2
 	 */
 	 */
-	cam_rx_ctrl = omap4_ctrl_pad_readl(
-			OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_CAMERA_RX);
-
+	/*
+	 * TODO: When implementing DT support specify the CONTROL_CAMERA_RX
+	 * register offset in the syscon property instead of hardcoding it.
+	 */
+	regmap_read(iss->syscon, 0x68, &cam_rx_ctrl);
 
 
 	if (subdevs->interface == ISS_INTERFACE_CSI2A_PHY1) {
 	if (subdevs->interface == ISS_INTERFACE_CSI2A_PHY1) {
 		cam_rx_ctrl &= ~(OMAP4_CAMERARX_CSI21_LANEENABLE_MASK |
 		cam_rx_ctrl &= ~(OMAP4_CAMERARX_CSI21_LANEENABLE_MASK |
@@ -166,8 +169,7 @@ int omap4iss_csiphy_config(struct iss_device *iss,
 		cam_rx_ctrl |= OMAP4_CAMERARX_CSI22_CTRLCLKEN_MASK;
 		cam_rx_ctrl |= OMAP4_CAMERARX_CSI22_CTRLCLKEN_MASK;
 	}
 	}
 
 
-	omap4_ctrl_pad_writel(cam_rx_ctrl,
-		 OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_CAMERA_RX);
+	regmap_write(iss->syscon, 0x68, cam_rx_ctrl);
 
 
 	/* Reset used lane count */
 	/* Reset used lane count */
 	csi2->phy->used_data_lanes = 0;
 	csi2->phy->used_data_lanes = 0;

+ 17 - 1
drivers/tty/hvc/hvc_xen.c

@@ -299,11 +299,27 @@ static int xen_initial_domain_console_init(void)
 	return 0;
 	return 0;
 }
 }
 
 
+static void xen_console_update_evtchn(struct xencons_info *info)
+{
+	if (xen_hvm_domain()) {
+		uint64_t v;
+		int err;
+
+		err = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v);
+		if (!err && v)
+			info->evtchn = v;
+	} else
+		info->evtchn = xen_start_info->console.domU.evtchn;
+}
+
 void xen_console_resume(void)
 void xen_console_resume(void)
 {
 {
 	struct xencons_info *info = vtermno_to_xencons(HVC_COOKIE);
 	struct xencons_info *info = vtermno_to_xencons(HVC_COOKIE);
-	if (info != NULL && info->irq)
+	if (info != NULL && info->irq) {
+		if (!xen_initial_domain())
+			xen_console_update_evtchn(info);
 		rebind_evtchn_irq(info->evtchn, info->irq);
 		rebind_evtchn_irq(info->evtchn, info->irq);
+	}
 }
 }
 
 
 static void xencons_disconnect_backend(struct xencons_info *info)
 static void xencons_disconnect_backend(struct xencons_info *info)

+ 7 - 1
drivers/vfio/pci/vfio_pci.c

@@ -907,8 +907,14 @@ static void vfio_pci_request(void *device_data, unsigned int count)
 	mutex_lock(&vdev->igate);
 	mutex_lock(&vdev->igate);
 
 
 	if (vdev->req_trigger) {
 	if (vdev->req_trigger) {
-		dev_dbg(&vdev->pdev->dev, "Requesting device from user\n");
+		if (!(count % 10))
+			dev_notice_ratelimited(&vdev->pdev->dev,
+				"Relaying device request to user (#%u)\n",
+				count);
 		eventfd_signal(vdev->req_trigger, 1);
 		eventfd_signal(vdev->req_trigger, 1);
+	} else if (count == 0) {
+		dev_warn(&vdev->pdev->dev,
+			"No device request channel registered, blocked until released by user\n");
 	}
 	}
 
 
 	mutex_unlock(&vdev->igate);
 	mutex_unlock(&vdev->igate);

+ 18 - 3
drivers/vfio/vfio.c

@@ -710,6 +710,8 @@ void *vfio_del_group_dev(struct device *dev)
 	void *device_data = device->device_data;
 	void *device_data = device->device_data;
 	struct vfio_unbound_dev *unbound;
 	struct vfio_unbound_dev *unbound;
 	unsigned int i = 0;
 	unsigned int i = 0;
+	long ret;
+	bool interrupted = false;
 
 
 	/*
 	/*
 	 * The group exists so long as we have a device reference.  Get
 	 * The group exists so long as we have a device reference.  Get
@@ -755,9 +757,22 @@ void *vfio_del_group_dev(struct device *dev)
 
 
 		vfio_device_put(device);
 		vfio_device_put(device);
 
 
-	} while (wait_event_interruptible_timeout(vfio.release_q,
-						  !vfio_dev_present(group, dev),
-						  HZ * 10) <= 0);
+		if (interrupted) {
+			ret = wait_event_timeout(vfio.release_q,
+					!vfio_dev_present(group, dev), HZ * 10);
+		} else {
+			ret = wait_event_interruptible_timeout(vfio.release_q,
+					!vfio_dev_present(group, dev), HZ * 10);
+			if (ret == -ERESTARTSYS) {
+				interrupted = true;
+				dev_warn(dev,
+					 "Device is currently in use, task"
+					 " \"%s\" (%d) "
+					 "blocked until device is released",
+					 current->comm, task_pid_nr(current));
+			}
+		}
+	} while (ret <= 0);
 
 
 	vfio_group_put(group);
 	vfio_group_put(group);
 
 

+ 10 - 0
drivers/xen/events/events_2l.c

@@ -345,6 +345,15 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
 	return IRQ_HANDLED;
 	return IRQ_HANDLED;
 }
 }
 
 
+static void evtchn_2l_resume(void)
+{
+	int i;
+
+	for_each_online_cpu(i)
+		memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) *
+				EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
+}
+
 static const struct evtchn_ops evtchn_ops_2l = {
 static const struct evtchn_ops evtchn_ops_2l = {
 	.max_channels      = evtchn_2l_max_channels,
 	.max_channels      = evtchn_2l_max_channels,
 	.nr_channels       = evtchn_2l_max_channels,
 	.nr_channels       = evtchn_2l_max_channels,
@@ -356,6 +365,7 @@ static const struct evtchn_ops evtchn_ops_2l = {
 	.mask              = evtchn_2l_mask,
 	.mask              = evtchn_2l_mask,
 	.unmask            = evtchn_2l_unmask,
 	.unmask            = evtchn_2l_unmask,
 	.handle_events     = evtchn_2l_handle_events,
 	.handle_events     = evtchn_2l_handle_events,
+	.resume	           = evtchn_2l_resume,
 };
 };
 
 
 void __init xen_evtchn_2l_init(void)
 void __init xen_evtchn_2l_init(void)

+ 4 - 3
drivers/xen/events/events_base.c

@@ -529,8 +529,8 @@ static unsigned int __startup_pirq(unsigned int irq)
 	if (rc)
 	if (rc)
 		goto err;
 		goto err;
 
 
-	bind_evtchn_to_cpu(evtchn, 0);
 	info->evtchn = evtchn;
 	info->evtchn = evtchn;
+	bind_evtchn_to_cpu(evtchn, 0);
 
 
 	rc = xen_evtchn_port_setup(info);
 	rc = xen_evtchn_port_setup(info);
 	if (rc)
 	if (rc)
@@ -1279,8 +1279,9 @@ void rebind_evtchn_irq(int evtchn, int irq)
 
 
 	mutex_unlock(&irq_mapping_update_lock);
 	mutex_unlock(&irq_mapping_update_lock);
 
 
-	/* new event channels are always bound to cpu 0 */
-	irq_set_affinity(irq, cpumask_of(0));
+        bind_evtchn_to_cpu(evtchn, info->cpu);
+	/* This will be deferred until interrupt is processed */
+	irq_set_affinity(irq, cpumask_of(info->cpu));
 
 
 	/* Unmask the event channel. */
 	/* Unmask the event channel. */
 	enable_irq(irq);
 	enable_irq(irq);

+ 3 - 25
drivers/xen/gntdev.c

@@ -327,30 +327,10 @@ static int map_grant_pages(struct grant_map *map)
 	return err;
 	return err;
 }
 }
 
 
-struct unmap_grant_pages_callback_data
-{
-	struct completion completion;
-	int result;
-};
-
-static void unmap_grant_callback(int result,
-				 struct gntab_unmap_queue_data *data)
-{
-	struct unmap_grant_pages_callback_data* d = data->data;
-
-	d->result = result;
-	complete(&d->completion);
-}
-
 static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
 static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
 {
 {
 	int i, err = 0;
 	int i, err = 0;
 	struct gntab_unmap_queue_data unmap_data;
 	struct gntab_unmap_queue_data unmap_data;
-	struct unmap_grant_pages_callback_data data;
-
-	init_completion(&data.completion);
-	unmap_data.data = &data;
-	unmap_data.done= &unmap_grant_callback;
 
 
 	if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
 	if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
 		int pgno = (map->notify.addr >> PAGE_SHIFT);
 		int pgno = (map->notify.addr >> PAGE_SHIFT);
@@ -367,11 +347,9 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
 	unmap_data.pages = map->pages + offset;
 	unmap_data.pages = map->pages + offset;
 	unmap_data.count = pages;
 	unmap_data.count = pages;
 
 
-	gnttab_unmap_refs_async(&unmap_data);
-
-	wait_for_completion(&data.completion);
-	if (data.result)
-		return data.result;
+	err = gnttab_unmap_refs_sync(&unmap_data);
+	if (err)
+		return err;
 
 
 	for (i = 0; i < pages; i++) {
 	for (i = 0; i < pages; i++) {
 		if (map->unmap_ops[offset+i].status)
 		if (map->unmap_ops[offset+i].status)

+ 28 - 0
drivers/xen/grant-table.c

@@ -123,6 +123,11 @@ struct gnttab_ops {
 	int (*query_foreign_access)(grant_ref_t ref);
 	int (*query_foreign_access)(grant_ref_t ref);
 };
 };
 
 
+struct unmap_refs_callback_data {
+	struct completion completion;
+	int result;
+};
+
 static struct gnttab_ops *gnttab_interface;
 static struct gnttab_ops *gnttab_interface;
 
 
 static int grant_table_version;
 static int grant_table_version;
@@ -863,6 +868,29 @@ void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
 }
 }
 EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
 EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
 
 
+static void unmap_refs_callback(int result,
+		struct gntab_unmap_queue_data *data)
+{
+	struct unmap_refs_callback_data *d = data->data;
+
+	d->result = result;
+	complete(&d->completion);
+}
+
+int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
+{
+	struct unmap_refs_callback_data data;
+
+	init_completion(&data.completion);
+	item->data = &data;
+	item->done = &unmap_refs_callback;
+	gnttab_unmap_refs_async(item);
+	wait_for_completion(&data.completion);
+
+	return data.result;
+}
+EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
+
 static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
 static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
 {
 {
 	int rc;
 	int rc;

+ 6 - 3
drivers/xen/manage.c

@@ -131,6 +131,8 @@ static void do_suspend(void)
 		goto out_resume;
 		goto out_resume;
 	}
 	}
 
 
+	xen_arch_suspend();
+
 	si.cancelled = 1;
 	si.cancelled = 1;
 
 
 	err = stop_machine(xen_suspend, &si, cpumask_of(0));
 	err = stop_machine(xen_suspend, &si, cpumask_of(0));
@@ -148,11 +150,12 @@ static void do_suspend(void)
 		si.cancelled = 1;
 		si.cancelled = 1;
 	}
 	}
 
 
+	xen_arch_resume();
+
 out_resume:
 out_resume:
-	if (!si.cancelled) {
-		xen_arch_resume();
+	if (!si.cancelled)
 		xs_resume();
 		xs_resume();
-	} else
+	else
 		xs_suspend_cancel();
 		xs_suspend_cancel();
 
 
 	dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
 	dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);

+ 1 - 1
drivers/xen/swiotlb-xen.c

@@ -235,7 +235,7 @@ retry:
 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
 		while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
 		while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
-			xen_io_tlb_start = (void *)__get_free_pages(__GFP_NOWARN, order);
+			xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order);
 			if (xen_io_tlb_start)
 			if (xen_io_tlb_start)
 				break;
 				break;
 			order--;
 			order--;

+ 3 - 3
drivers/xen/xen-pciback/conf_space.c

@@ -16,8 +16,8 @@
 #include "conf_space.h"
 #include "conf_space.h"
 #include "conf_space_quirks.h"
 #include "conf_space_quirks.h"
 
 
-bool permissive;
-module_param(permissive, bool, 0644);
+bool xen_pcibk_permissive;
+module_param_named(permissive, xen_pcibk_permissive, bool, 0644);
 
 
 /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word,
 /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word,
  * xen_pcibk_write_config_word, and xen_pcibk_write_config_byte are created. */
  * xen_pcibk_write_config_word, and xen_pcibk_write_config_byte are created. */
@@ -262,7 +262,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
 		 * This means that some fields may still be read-only because
 		 * This means that some fields may still be read-only because
 		 * they have entries in the config_field list that intercept
 		 * they have entries in the config_field list that intercept
 		 * the write and do nothing. */
 		 * the write and do nothing. */
-		if (dev_data->permissive || permissive) {
+		if (dev_data->permissive || xen_pcibk_permissive) {
 			switch (size) {
 			switch (size) {
 			case 1:
 			case 1:
 				err = pci_write_config_byte(dev, offset,
 				err = pci_write_config_byte(dev, offset,

+ 1 - 1
drivers/xen/xen-pciback/conf_space.h

@@ -64,7 +64,7 @@ struct config_field_entry {
 	void *data;
 	void *data;
 };
 };
 
 
-extern bool permissive;
+extern bool xen_pcibk_permissive;
 
 
 #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
 #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
 
 

+ 1 - 1
drivers/xen/xen-pciback/conf_space_header.c

@@ -118,7 +118,7 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
 
 
 	cmd->val = value;
 	cmd->val = value;
 
 
-	if (!permissive && (!dev_data || !dev_data->permissive))
+	if (!xen_pcibk_permissive && (!dev_data || !dev_data->permissive))
 		return 0;
 		return 0;
 
 
 	/* Only allow the guest to control certain bits. */
 	/* Only allow the guest to control certain bits. */

+ 29 - 0
drivers/xen/xenbus/xenbus_probe.c

@@ -57,6 +57,7 @@
 #include <xen/xen.h>
 #include <xen/xen.h>
 #include <xen/xenbus.h>
 #include <xen/xenbus.h>
 #include <xen/events.h>
 #include <xen/events.h>
+#include <xen/xen-ops.h>
 #include <xen/page.h>
 #include <xen/page.h>
 
 
 #include <xen/hvm.h>
 #include <xen/hvm.h>
@@ -735,6 +736,30 @@ static int __init xenstored_local_init(void)
 	return err;
 	return err;
 }
 }
 
 
+static int xenbus_resume_cb(struct notifier_block *nb,
+			    unsigned long action, void *data)
+{
+	int err = 0;
+
+	if (xen_hvm_domain()) {
+		uint64_t v;
+
+		err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
+		if (!err && v)
+			xen_store_evtchn = v;
+		else
+			pr_warn("Cannot update xenstore event channel: %d\n",
+				err);
+	} else
+		xen_store_evtchn = xen_start_info->store_evtchn;
+
+	return err;
+}
+
+static struct notifier_block xenbus_resume_nb = {
+	.notifier_call = xenbus_resume_cb,
+};
+
 static int __init xenbus_init(void)
 static int __init xenbus_init(void)
 {
 {
 	int err = 0;
 	int err = 0;
@@ -793,6 +818,10 @@ static int __init xenbus_init(void)
 		goto out_error;
 		goto out_error;
 	}
 	}
 
 
+	if ((xen_store_domain_type != XS_LOCAL) &&
+	    (xen_store_domain_type != XS_UNKNOWN))
+		xen_resume_notifier_register(&xenbus_resume_nb);
+
 #ifdef CONFIG_XEN_COMPAT_XENFS
 #ifdef CONFIG_XEN_COMPAT_XENFS
 	/*
 	/*
 	 * Create xenfs mountpoint in /proc for compatibility with
 	 * Create xenfs mountpoint in /proc for compatibility with

+ 1 - 1
fs/configfs/mount.c

@@ -173,5 +173,5 @@ MODULE_LICENSE("GPL");
 MODULE_VERSION("0.0.2");
 MODULE_VERSION("0.0.2");
 MODULE_DESCRIPTION("Simple RAM filesystem for user driven kernel subsystem configuration.");
 MODULE_DESCRIPTION("Simple RAM filesystem for user driven kernel subsystem configuration.");
 
 
-module_init(configfs_init);
+core_initcall(configfs_init);
 module_exit(configfs_exit);
 module_exit(configfs_exit);

+ 1 - 1
fs/efivarfs/super.c

@@ -121,7 +121,7 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
 	int len, i;
 	int len, i;
 	int err = -ENOMEM;
 	int err = -ENOMEM;
 
 
-	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 	if (!entry)
 	if (!entry)
 		return err;
 		return err;
 
 

+ 7 - 0
fs/f2fs/data.c

@@ -1513,6 +1513,7 @@ static int f2fs_write_data_pages(struct address_space *mapping,
 {
 {
 	struct inode *inode = mapping->host;
 	struct inode *inode = mapping->host;
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	bool locked = false;
 	int ret;
 	int ret;
 	long diff;
 	long diff;
 
 
@@ -1533,7 +1534,13 @@ static int f2fs_write_data_pages(struct address_space *mapping,
 
 
 	diff = nr_pages_to_write(sbi, DATA, wbc);
 	diff = nr_pages_to_write(sbi, DATA, wbc);
 
 
+	if (!S_ISDIR(inode->i_mode)) {
+		mutex_lock(&sbi->writepages);
+		locked = true;
+	}
 	ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
 	ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
+	if (locked)
+		mutex_unlock(&sbi->writepages);
 
 
 	f2fs_submit_merged_bio(sbi, DATA, WRITE);
 	f2fs_submit_merged_bio(sbi, DATA, WRITE);
 
 

+ 1 - 0
fs/f2fs/f2fs.h

@@ -625,6 +625,7 @@ struct f2fs_sb_info {
 	struct mutex cp_mutex;			/* checkpoint procedure lock */
 	struct mutex cp_mutex;			/* checkpoint procedure lock */
 	struct rw_semaphore cp_rwsem;		/* blocking FS operations */
 	struct rw_semaphore cp_rwsem;		/* blocking FS operations */
 	struct rw_semaphore node_write;		/* locking node writes */
 	struct rw_semaphore node_write;		/* locking node writes */
+	struct mutex writepages;		/* mutex for writepages() */
 	wait_queue_head_t cp_wait;
 	wait_queue_head_t cp_wait;
 
 
 	struct inode_management im[MAX_INO_ENTRY];      /* manage inode cache */
 	struct inode_management im[MAX_INO_ENTRY];      /* manage inode cache */

+ 3 - 5
fs/f2fs/namei.c

@@ -298,16 +298,14 @@ fail:
 
 
 static void *f2fs_follow_link(struct dentry *dentry, struct nameidata *nd)
 static void *f2fs_follow_link(struct dentry *dentry, struct nameidata *nd)
 {
 {
-	struct page *page;
+	struct page *page = page_follow_link_light(dentry, nd);
 
 
-	page = page_follow_link_light(dentry, nd);
-	if (IS_ERR(page))
+	if (IS_ERR_OR_NULL(page))
 		return page;
 		return page;
 
 
 	/* this is broken symlink case */
 	/* this is broken symlink case */
 	if (*nd_get_link(nd) == 0) {
 	if (*nd_get_link(nd) == 0) {
-		kunmap(page);
-		page_cache_release(page);
+		page_put_link(dentry, nd, page);
 		return ERR_PTR(-ENOENT);
 		return ERR_PTR(-ENOENT);
 	}
 	}
 	return page;
 	return page;

+ 1 - 0
fs/f2fs/super.c

@@ -1035,6 +1035,7 @@ try_onemore:
 	sbi->raw_super = raw_super;
 	sbi->raw_super = raw_super;
 	sbi->raw_super_buf = raw_super_buf;
 	sbi->raw_super_buf = raw_super_buf;
 	mutex_init(&sbi->gc_mutex);
 	mutex_init(&sbi->gc_mutex);
+	mutex_init(&sbi->writepages);
 	mutex_init(&sbi->cp_mutex);
 	mutex_init(&sbi->cp_mutex);
 	init_rwsem(&sbi->node_write);
 	init_rwsem(&sbi->node_write);
 	clear_sbi_flag(sbi, SBI_POR_DOING);
 	clear_sbi_flag(sbi, SBI_POR_DOING);

+ 1 - 1
fs/nilfs2/btree.c

@@ -388,7 +388,7 @@ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
 	nchildren = nilfs_btree_node_get_nchildren(node);
 	nchildren = nilfs_btree_node_get_nchildren(node);
 
 
 	if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
 	if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
-		     level > NILFS_BTREE_LEVEL_MAX ||
+		     level >= NILFS_BTREE_LEVEL_MAX ||
 		     nchildren < 0 ||
 		     nchildren < 0 ||
 		     nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) {
 		     nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) {
 		pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n",
 		pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n",

+ 13 - 0
fs/ocfs2/dlm/dlmmaster.c

@@ -757,6 +757,19 @@ lookup:
 	if (tmpres) {
 	if (tmpres) {
 		spin_unlock(&dlm->spinlock);
 		spin_unlock(&dlm->spinlock);
 		spin_lock(&tmpres->spinlock);
 		spin_lock(&tmpres->spinlock);
+
+		/*
+		 * Right after dlm spinlock was released, dlm_thread could have
+		 * purged the lockres. Check if lockres got unhashed. If so
+		 * start over.
+		 */
+		if (hlist_unhashed(&tmpres->hash_node)) {
+			spin_unlock(&tmpres->spinlock);
+			dlm_lockres_put(tmpres);
+			tmpres = NULL;
+			goto lookup;
+		}
+
 		/* Wait on the thread that is mastering the resource */
 		/* Wait on the thread that is mastering the resource */
 		if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
 		if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
 			__dlm_wait_on_lockres(tmpres);
 			__dlm_wait_on_lockres(tmpres);

+ 15 - 1
include/linux/compiler-gcc.h

@@ -9,10 +9,24 @@
 		   + __GNUC_MINOR__ * 100 \
 		   + __GNUC_MINOR__ * 100 \
 		   + __GNUC_PATCHLEVEL__)
 		   + __GNUC_PATCHLEVEL__)
 
 
-
 /* Optimization barrier */
 /* Optimization barrier */
+
 /* The "volatile" is due to gcc bugs */
 /* The "volatile" is due to gcc bugs */
 #define barrier() __asm__ __volatile__("": : :"memory")
 #define barrier() __asm__ __volatile__("": : :"memory")
+/*
+ * This version is i.e. to prevent dead stores elimination on @ptr
+ * where gcc and llvm may behave differently when otherwise using
+ * normal barrier(): while gcc behavior gets along with a normal
+ * barrier(), llvm needs an explicit input variable to be assumed
+ * clobbered. The issue is as follows: while the inline asm might
+ * access any memory it wants, the compiler could have fit all of
+ * @ptr into memory registers instead, and since @ptr never escaped
+ * from that, it proofed that the inline asm wasn't touching any of
+ * it. This version works well with both compilers, i.e. we're telling
+ * the compiler that the inline asm absolutely may see the contents
+ * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
+ */
+#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
 
 
 /*
 /*
  * This macro obfuscates arithmetic on a variable address so that gcc
  * This macro obfuscates arithmetic on a variable address so that gcc

+ 3 - 0
include/linux/compiler-intel.h

@@ -13,9 +13,12 @@
 /* Intel ECC compiler doesn't support gcc specific asm stmts.
 /* Intel ECC compiler doesn't support gcc specific asm stmts.
  * It uses intrinsics to do the equivalent things.
  * It uses intrinsics to do the equivalent things.
  */
  */
+#undef barrier_data
 #undef RELOC_HIDE
 #undef RELOC_HIDE
 #undef OPTIMIZER_HIDE_VAR
 #undef OPTIMIZER_HIDE_VAR
 
 
+#define barrier_data(ptr) barrier()
+
 #define RELOC_HIDE(ptr, off)					\
 #define RELOC_HIDE(ptr, off)					\
   ({ unsigned long __ptr;					\
   ({ unsigned long __ptr;					\
      __ptr = (unsigned long) (ptr);				\
      __ptr = (unsigned long) (ptr);				\

Kaikkia tiedostoja ei voida näyttää, sillä liian monta tiedostoa muuttui tässä diffissä