Просмотр исходного кода

Merge tag 'kvm-s390-next-4.9-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD

KVM: s390: features and fixes for 4.9

- lazy enablement of runtime instrumentation
- up to 255 CPUs for nested guests
- rework of machine check deliver
- cleanups/fixes
Paolo Bonzini 9 лет назад
Родитель
Сommit
6f90f1d1d2
100 измененных файлов с 2056 добавлено и 540 удалено
  1. 1 1
      Documentation/conf.py
  2. 2 2
      Documentation/hwmon/ftsteutates
  3. 0 6
      Documentation/kernel-documentation.rst
  4. 26 1
      Documentation/power/basic-pm-debugging.txt
  5. 76 75
      Documentation/power/interface.txt
  6. 2 1
      Documentation/sphinx-static/theme_overrides.css
  7. 6 0
      MAINTAINERS
  8. 1 1
      Makefile
  9. 1 0
      arch/arm/kernel/entry-armv.S
  10. 6 0
      arch/arm/mach-imx/gpc.c
  11. 16 5
      arch/arm/mm/mmu.c
  12. 9 1
      arch/arm64/kernel/sleep.S
  13. 3 3
      arch/arm64/mm/dump.c
  14. 2 0
      arch/arm64/mm/numa.c
  15. 2 2
      arch/parisc/include/uapi/asm/errno.h
  16. 0 8
      arch/parisc/kernel/processor.c
  17. 0 12
      arch/parisc/kernel/time.c
  18. 24 0
      arch/s390/include/asm/facilities_src.h
  19. 1 1
      arch/s390/include/asm/kvm_host.h
  20. 1 0
      arch/s390/kernel/asm-offsets.c
  21. 18 19
      arch/s390/kvm/gaccess.c
  22. 30 29
      arch/s390/kvm/guestdbg.c
  23. 1 0
      arch/s390/kvm/intercept.c
  24. 75 23
      arch/s390/kvm/interrupt.c
  25. 31 39
      arch/s390/kvm/kvm-s390.c
  26. 11 3
      arch/s390/kvm/kvm-s390.h
  27. 21 0
      arch/s390/kvm/priv.c
  28. 8 1
      arch/x86/kernel/cpu/microcode/amd.c
  29. 16 9
      arch/x86/kernel/smpboot.c
  30. 1 1
      arch/x86/power/hibernate_64.c
  31. 10 6
      drivers/clocksource/bcm_kona_timer.c
  32. 1 1
      drivers/clocksource/mips-gic-timer.c
  33. 0 1
      drivers/clocksource/time-armada-370-xp.c
  34. 8 0
      drivers/edac/Kconfig
  35. 1 0
      drivers/edac/Makefile
  36. 1121 0
      drivers/edac/skx_edac.c
  37. 2 2
      drivers/gpu/drm/amd/amdgpu/amdgpu.h
  38. 0 9
      drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
  39. 2 2
      drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
  40. 2 1
      drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
  41. 4 1
      drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
  42. 1 1
      drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
  43. 1 1
      drivers/gpu/drm/drm_fb_helper.c
  44. 5 5
      drivers/gpu/drm/etnaviv/etnaviv_gpu.c
  45. 1 0
      drivers/gpu/drm/i915/i915_drv.h
  46. 8 2
      drivers/gpu/drm/i915/i915_gem.c
  47. 1 0
      drivers/gpu/drm/i915/i915_gem_gtt.c
  48. 1 0
      drivers/gpu/drm/i915/i915_reg.h
  49. 6 0
      drivers/gpu/drm/i915/intel_audio.c
  50. 65 26
      drivers/gpu/drm/i915/intel_ddi.c
  51. 110 60
      drivers/gpu/drm/i915/intel_display.c
  52. 20 0
      drivers/gpu/drm/i915/intel_fbc.c
  53. 3 3
      drivers/gpu/drm/i915/intel_pm.c
  54. 4 4
      drivers/gpu/drm/i915/intel_ringbuffer.c
  55. 3 0
      drivers/gpu/drm/mediatek/Kconfig
  56. 0 9
      drivers/gpu/drm/radeon/radeon_atpx_handler.c
  57. 1 1
      drivers/hwmon/it87.c
  58. 14 10
      drivers/i2c/busses/i2c-at91.c
  59. 1 1
      drivers/i2c/busses/i2c-bcm-iproc.c
  60. 1 1
      drivers/i2c/busses/i2c-bcm-kona.c
  61. 1 1
      drivers/i2c/busses/i2c-brcmstb.c
  62. 1 1
      drivers/i2c/busses/i2c-cros-ec-tunnel.c
  63. 3 3
      drivers/i2c/busses/i2c-meson.c
  64. 10 4
      drivers/i2c/busses/i2c-ocores.c
  65. 3 1
      drivers/i2c/muxes/i2c-demux-pinctrl.c
  66. 1 1
      drivers/md/dm-crypt.c
  67. 47 35
      drivers/md/dm-raid.c
  68. 5 2
      drivers/md/dm-round-robin.c
  69. 3 11
      drivers/of/base.c
  70. 1 1
      drivers/of/fdt.c
  71. 3 2
      drivers/of/irq.c
  72. 2 0
      drivers/of/platform.c
  73. 11 2
      drivers/scsi/aacraid/commctrl.c
  74. 1 1
      drivers/scsi/fcoe/fcoe_ctlr.c
  75. 3 3
      drivers/scsi/megaraid/megaraid_sas_base.c
  76. 1 1
      drivers/scsi/megaraid/megaraid_sas_fusion.c
  77. 11 11
      drivers/scsi/mpt3sas/mpt3sas_base.c
  78. 2 1
      drivers/scsi/ses.c
  79. 2 3
      drivers/usb/class/cdc-acm.c
  80. 0 1
      drivers/usb/class/cdc-acm.h
  81. 63 3
      drivers/usb/core/config.c
  82. 5 2
      drivers/usb/core/devio.c
  83. 9 14
      drivers/usb/core/hub.c
  84. 1 0
      drivers/usb/dwc3/dwc3-of-simple.c
  85. 2 0
      drivers/usb/dwc3/dwc3-pci.c
  86. 33 22
      drivers/usb/dwc3/gadget.c
  87. 4 2
      drivers/usb/gadget/composite.c
  88. 2 0
      drivers/usb/gadget/configfs.c
  89. 6 0
      drivers/usb/gadget/function/rndis.c
  90. 2 1
      drivers/usb/gadget/function/u_ether.c
  91. 1 1
      drivers/usb/gadget/function/uvc_configfs.c
  92. 2 2
      drivers/usb/gadget/legacy/inode.c
  93. 4 1
      drivers/usb/gadget/udc/core.c
  94. 1 1
      drivers/usb/gadget/udc/fsl_qe_udc.c
  95. 2 2
      drivers/usb/host/ehci-hcd.c
  96. 1 1
      drivers/usb/host/max3421-hcd.c
  97. 3 0
      drivers/usb/host/xhci-hub.c
  98. 2 1
      drivers/usb/host/xhci-pci.c
  99. 9 7
      drivers/usb/host/xhci-ring.c
  100. 5 5
      drivers/usb/misc/ftdi-elan.c

+ 1 - 1
Documentation/conf.py

@@ -131,7 +131,7 @@ pygments_style = 'sphinx'
 todo_include_todos = False
 todo_include_todos = False
 
 
 primary_domain = 'C'
 primary_domain = 'C'
-highlight_language = 'C'
+highlight_language = 'guess'
 
 
 # -- Options for HTML output ----------------------------------------------
 # -- Options for HTML output ----------------------------------------------
 
 

+ 2 - 2
Documentation/hwmon/ftsteutates

@@ -19,5 +19,5 @@ enhancements. It can monitor up to 4 voltages, 16 temperatures and
 implemented in this driver.
 implemented in this driver.
 
 
 Specification of the chip can be found here:
 Specification of the chip can be found here:
-ftp:///pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/BMC-Teutates_Specification_V1.21.pdf
-ftp:///pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/Fujitsu_mainboards-1-Sensors_HowTo-en-US.pdf
+ftp://ftp.ts.fujitsu.com/pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/BMC-Teutates_Specification_V1.21.pdf
+ftp://ftp.ts.fujitsu.com/pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/Fujitsu_mainboards-1-Sensors_HowTo-en-US.pdf

+ 0 - 6
Documentation/kernel-documentation.rst

@@ -366,8 +366,6 @@ Domain`_ references.
 Cross-referencing from reStructuredText
 Cross-referencing from reStructuredText
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 
-.. highlight:: none
-
 To cross-reference the functions and types defined in the kernel-doc comments
 To cross-reference the functions and types defined in the kernel-doc comments
 from reStructuredText documents, please use the `Sphinx C Domain`_
 from reStructuredText documents, please use the `Sphinx C Domain`_
 references. For example::
 references. For example::
@@ -390,8 +388,6 @@ For further details, please refer to the `Sphinx C Domain`_ documentation.
 Function documentation
 Function documentation
 ----------------------
 ----------------------
 
 
-.. highlight:: c
-
 The general format of a function and function-like macro kernel-doc comment is::
 The general format of a function and function-like macro kernel-doc comment is::
 
 
   /**
   /**
@@ -572,8 +568,6 @@ DocBook XML [DEPRECATED]
 Converting DocBook to Sphinx
 Converting DocBook to Sphinx
 ----------------------------
 ----------------------------
 
 
-.. highlight:: none
-
 Over time, we expect all of the documents under ``Documentation/DocBook`` to be
 Over time, we expect all of the documents under ``Documentation/DocBook`` to be
 converted to Sphinx and reStructuredText. For most DocBook XML documents, a good
 converted to Sphinx and reStructuredText. For most DocBook XML documents, a good
 enough solution is to use the simple ``Documentation/sphinx/tmplcvt`` script,
 enough solution is to use the simple ``Documentation/sphinx/tmplcvt`` script,

+ 26 - 1
Documentation/power/basic-pm-debugging.txt

@@ -164,7 +164,32 @@ load n/2 modules more and try again.
 Again, if you find the offending module(s), it(they) must be unloaded every time
 Again, if you find the offending module(s), it(they) must be unloaded every time
 before hibernation, and please report the problem with it(them).
 before hibernation, and please report the problem with it(them).
 
 
-c) Advanced debugging
+c) Using the "test_resume" hibernation option
+
+/sys/power/disk generally tells the kernel what to do after creating a
+hibernation image.  One of the available options is "test_resume" which
+causes the just created image to be used for immediate restoration.  Namely,
+after doing:
+
+# echo test_resume > /sys/power/disk
+# echo disk > /sys/power/state
+
+a hibernation image will be created and a resume from it will be triggered
+immediately without involving the platform firmware in any way.
+
+That test can be used to check if failures to resume from hibernation are
+related to bad interactions with the platform firmware.  That is, if the above
+works every time, but resume from actual hibernation does not work or is
+unreliable, the platform firmware may be responsible for the failures.
+
+On architectures and platforms that support using different kernels to restore
+hibernation images (that is, the kernel used to read the image from storage and
+load it into memory is different from the one included in the image) or support
+kernel address space randomization, it also can be used to check if failures
+to resume may be related to the differences between the restore and image
+kernels.
+
+d) Advanced debugging
 
 
 In case that hibernation does not work on your system even in the minimal
 In case that hibernation does not work on your system even in the minimal
 configuration and compiling more drivers as modules is not practical or some
 configuration and compiling more drivers as modules is not practical or some

+ 76 - 75
Documentation/power/interface.txt

@@ -1,75 +1,76 @@
-Power Management Interface
-
-
-The power management subsystem provides a unified sysfs interface to 
-userspace, regardless of what architecture or platform one is
-running. The interface exists in /sys/power/ directory (assuming sysfs
-is mounted at /sys). 
-
-/sys/power/state controls system power state. Reading from this file
-returns what states are supported, which is hard-coded to 'freeze',
-'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk'
-(Suspend-to-Disk). 
-
-Writing to this file one of those strings causes the system to
-transition into that state. Please see the file
-Documentation/power/states.txt for a description of each of those
-states.
-
-
-/sys/power/disk controls the operating mode of the suspend-to-disk
-mechanism. Suspend-to-disk can be handled in several ways. We have a
-few options for putting the system to sleep - using the platform driver
-(e.g. ACPI or other suspend_ops), powering off the system or rebooting the
-system (for testing).
-
-Additionally, /sys/power/disk can be used to turn on one of the two testing
-modes of the suspend-to-disk mechanism: 'testproc' or 'test'.  If the
-suspend-to-disk mechanism is in the 'testproc' mode, writing 'disk' to
-/sys/power/state will cause the kernel to disable nonboot CPUs and freeze
-tasks, wait for 5 seconds, unfreeze tasks and enable nonboot CPUs.  If it is
-in the 'test' mode, writing 'disk' to /sys/power/state will cause the kernel
-to disable nonboot CPUs and freeze tasks, shrink memory, suspend devices, wait
-for 5 seconds, resume devices, unfreeze tasks and enable nonboot CPUs.  Then,
-we are able to look in the log messages and work out, for example, which code
-is being slow and which device drivers are misbehaving.
-
-Reading from this file will display all supported modes and the currently
-selected one in brackets, for example
-
-	[shutdown] reboot test testproc
-
-Writing to this file will accept one of
-
-       'platform' (only if the platform supports it)
-       'shutdown'
-       'reboot'
-       'testproc'
-       'test'
-
-/sys/power/image_size controls the size of the image created by
-the suspend-to-disk mechanism.  It can be written a string
-representing a non-negative integer that will be used as an upper
-limit of the image size, in bytes.  The suspend-to-disk mechanism will
-do its best to ensure the image size will not exceed that number.  However,
-if this turns out to be impossible, it will try to suspend anyway using the
-smallest image possible.  In particular, if "0" is written to this file, the
-suspend image will be as small as possible.
-
-Reading from this file will display the current image size limit, which
-is set to 2/5 of available RAM by default.
-
-/sys/power/pm_trace controls the code which saves the last PM event point in
-the RTC across reboots, so that you can debug a machine that just hangs
-during suspend (or more commonly, during resume).  Namely, the RTC is only
-used to save the last PM event point if this file contains '1'.  Initially it
-contains '0' which may be changed to '1' by writing a string representing a
-nonzero integer into it.
-
-To use this debugging feature you should attempt to suspend the machine, then
-reboot it and run
-
-	dmesg -s 1000000 | grep 'hash matches'
-
-CAUTION: Using it will cause your machine's real-time (CMOS) clock to be
-set to a random invalid time after a resume.
+Power Management Interface for System Sleep
+
+Copyright (c) 2016 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+The power management subsystem provides userspace with a unified sysfs interface
+for system sleep regardless of the underlying system architecture or platform.
+The interface is located in the /sys/power/ directory (assuming that sysfs is
+mounted at /sys).
+
+/sys/power/state is the system sleep state control file.
+
+Reading from it returns a list of supported sleep states, encoded as:
+
+'freeze' (Suspend-to-Idle)
+'standby' (Power-On Suspend)
+'mem' (Suspend-to-RAM)
+'disk' (Suspend-to-Disk)
+
+Suspend-to-Idle is always supported.  Suspend-to-Disk is always supported
+too as long the kernel has been configured to support hibernation at all
+(ie. CONFIG_HIBERNATION is set in the kernel configuration file).  Support
+for Suspend-to-RAM and Power-On Suspend depends on the capabilities of the
+platform.
+
+If one of the strings listed in /sys/power/state is written to it, the system
+will attempt to transition into the corresponding sleep state.  Refer to
+Documentation/power/states.txt for a description of each of those states.
+
+/sys/power/disk controls the operating mode of hibernation (Suspend-to-Disk).
+Specifically, it tells the kernel what to do after creating a hibernation image.
+
+Reading from it returns a list of supported options encoded as:
+
+'platform' (put the system into sleep using a platform-provided method)
+'shutdown' (shut the system down)
+'reboot' (reboot the system)
+'suspend' (trigger a Suspend-to-RAM transition)
+'test_resume' (resume-after-hibernation test mode)
+
+The currently selected option is printed in square brackets.
+
+The 'platform' option is only available if the platform provides a special
+mechanism to put the system to sleep after creating a hibernation image (ACPI
+does that, for example).  The 'suspend' option is available if Suspend-to-RAM
+is supported.  Refer to Documentation/power/basic_pm_debugging.txt for the
+description of the 'test_resume' option.
+
+To select an option, write the string representing it to /sys/power/disk.
+
+/sys/power/image_size controls the size of hibernation images.
+
+It can be written a string representing a non-negative integer that will be
+used as a best-effort upper limit of the image size, in bytes.  The hibernation
+core will do its best to ensure that the image size will not exceed that number.
+However, if that turns out to be impossible to achieve, a hibernation image will
+still be created and its size will be as small as possible.  In particular,
+writing '0' to this file will enforce hibernation images to be as small as
+possible.
+
+Reading from this file returns the current image size limit, which is set to
+around 2/5 of available RAM by default.
+
+/sys/power/pm_trace controls the PM trace mechanism saving the last suspend
+or resume event point in the RTC across reboots.
+
+It helps to debug hard lockups or reboots due to device driver failures that
+occur during system suspend or resume (which is more common) more effectively.
+
+If /sys/power/pm_trace contains '1', the fingerprint of each suspend/resume
+event point in turn will be stored in the RTC memory (overwriting the actual
+RTC information), so it will survive a system crash if one occurs right after
+storing it and it can be used later to identify the driver that caused the crash
+to happen (see Documentation/power/s2ram.txt for more information).
+
+Initially it contains '0' which may be changed to '1' by writing a string
+representing a nonzero integer into it.

+ 2 - 1
Documentation/sphinx-static/theme_overrides.css

@@ -42,11 +42,12 @@
     caption a.headerlink { opacity: 0; }
     caption a.headerlink { opacity: 0; }
     caption a.headerlink:hover { opacity: 1; }
     caption a.headerlink:hover { opacity: 1; }
 
 
-    /* inline literal: drop the borderbox and red color */
+    /* inline literal: drop the borderbox, padding and red color */
 
 
     code, .rst-content tt, .rst-content code {
     code, .rst-content tt, .rst-content code {
         color: inherit;
         color: inherit;
         border: none;
         border: none;
+        padding: unset;
         background: inherit;
         background: inherit;
         font-size: 85%;
         font-size: 85%;
     }
     }

+ 6 - 0
MAINTAINERS

@@ -4525,6 +4525,12 @@ L:	linux-edac@vger.kernel.org
 S:	Maintained
 S:	Maintained
 F:	drivers/edac/sb_edac.c
 F:	drivers/edac/sb_edac.c
 
 
+EDAC-SKYLAKE
+M:	Tony Luck <tony.luck@intel.com>
+L:	linux-edac@vger.kernel.org
+S:	Maintained
+F:	drivers/edac/skx_edac.c
+
 EDAC-XGENE
 EDAC-XGENE
 APPLIED MICRO (APM) X-GENE SOC EDAC
 APPLIED MICRO (APM) X-GENE SOC EDAC
 M:     Loc Ho <lho@apm.com>
 M:     Loc Ho <lho@apm.com>

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 4
 VERSION = 4
 PATCHLEVEL = 8
 PATCHLEVEL = 8
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
 NAME = Psychotic Stoned Sheep
 NAME = Psychotic Stoned Sheep
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*

+ 1 - 0
arch/arm/kernel/entry-armv.S

@@ -295,6 +295,7 @@ __und_svc_fault:
 	bl	__und_fault
 	bl	__und_fault
 
 
 __und_svc_finish:
 __und_svc_finish:
+	get_thread_info tsk
 	ldr	r5, [sp, #S_PSR]		@ Get SVC cpsr
 	ldr	r5, [sp, #S_PSR]		@ Get SVC cpsr
 	svc_exit r5				@ return from exception
 	svc_exit r5				@ return from exception
  UNWIND(.fnend		)
  UNWIND(.fnend		)

+ 6 - 0
arch/arm/mach-imx/gpc.c

@@ -271,6 +271,12 @@ static int __init imx_gpc_init(struct device_node *node,
 	for (i = 0; i < IMR_NUM; i++)
 	for (i = 0; i < IMR_NUM; i++)
 		writel_relaxed(~0, gpc_base + GPC_IMR1 + i * 4);
 		writel_relaxed(~0, gpc_base + GPC_IMR1 + i * 4);
 
 
+	/*
+	 * Clear the OF_POPULATED flag set in of_irq_init so that
+	 * later the GPC power domain driver will not be skipped.
+	 */
+	of_node_clear_flag(node, OF_POPULATED);
+
 	return 0;
 	return 0;
 }
 }
 IRQCHIP_DECLARE(imx_gpc, "fsl,imx6q-gpc", imx_gpc_init);
 IRQCHIP_DECLARE(imx_gpc, "fsl,imx6q-gpc", imx_gpc_init);

+ 16 - 5
arch/arm/mm/mmu.c

@@ -728,7 +728,8 @@ static void *__init late_alloc(unsigned long sz)
 {
 {
 	void *ptr = (void *)__get_free_pages(PGALLOC_GFP, get_order(sz));
 	void *ptr = (void *)__get_free_pages(PGALLOC_GFP, get_order(sz));
 
 
-	BUG_ON(!ptr);
+	if (!ptr || !pgtable_page_ctor(virt_to_page(ptr)))
+		BUG();
 	return ptr;
 	return ptr;
 }
 }
 
 
@@ -1155,10 +1156,19 @@ void __init sanity_check_meminfo(void)
 {
 {
 	phys_addr_t memblock_limit = 0;
 	phys_addr_t memblock_limit = 0;
 	int highmem = 0;
 	int highmem = 0;
-	phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
+	u64 vmalloc_limit;
 	struct memblock_region *reg;
 	struct memblock_region *reg;
 	bool should_use_highmem = false;
 	bool should_use_highmem = false;
 
 
+	/*
+	 * Let's use our own (unoptimized) equivalent of __pa() that is
+	 * not affected by wrap-arounds when sizeof(phys_addr_t) == 4.
+	 * The result is used as the upper bound on physical memory address
+	 * and may itself be outside the valid range for which phys_addr_t
+	 * and therefore __pa() is defined.
+	 */
+	vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
+
 	for_each_memblock(memory, reg) {
 	for_each_memblock(memory, reg) {
 		phys_addr_t block_start = reg->base;
 		phys_addr_t block_start = reg->base;
 		phys_addr_t block_end = reg->base + reg->size;
 		phys_addr_t block_end = reg->base + reg->size;
@@ -1183,10 +1193,11 @@ void __init sanity_check_meminfo(void)
 			if (reg->size > size_limit) {
 			if (reg->size > size_limit) {
 				phys_addr_t overlap_size = reg->size - size_limit;
 				phys_addr_t overlap_size = reg->size - size_limit;
 
 
-				pr_notice("Truncating RAM at %pa-%pa to -%pa",
-					  &block_start, &block_end, &vmalloc_limit);
-				memblock_remove(vmalloc_limit, overlap_size);
+				pr_notice("Truncating RAM at %pa-%pa",
+					  &block_start, &block_end);
 				block_end = vmalloc_limit;
 				block_end = vmalloc_limit;
+				pr_cont(" to -%pa", &block_end);
+				memblock_remove(vmalloc_limit, overlap_size);
 				should_use_highmem = true;
 				should_use_highmem = true;
 			}
 			}
 		}
 		}

+ 9 - 1
arch/arm64/kernel/sleep.S

@@ -101,12 +101,20 @@ ENTRY(cpu_resume)
 	bl	el2_setup		// if in EL2 drop to EL1 cleanly
 	bl	el2_setup		// if in EL2 drop to EL1 cleanly
 	/* enable the MMU early - so we can access sleep_save_stash by va */
 	/* enable the MMU early - so we can access sleep_save_stash by va */
 	adr_l	lr, __enable_mmu	/* __cpu_setup will return here */
 	adr_l	lr, __enable_mmu	/* __cpu_setup will return here */
-	ldr	x27, =_cpu_resume	/* __enable_mmu will branch here */
+	adr_l	x27, _resume_switched	/* __enable_mmu will branch here */
 	adrp	x25, idmap_pg_dir
 	adrp	x25, idmap_pg_dir
 	adrp	x26, swapper_pg_dir
 	adrp	x26, swapper_pg_dir
 	b	__cpu_setup
 	b	__cpu_setup
 ENDPROC(cpu_resume)
 ENDPROC(cpu_resume)
 
 
+	.pushsection	".idmap.text", "ax"
+_resume_switched:
+	ldr	x8, =_cpu_resume
+	br	x8
+ENDPROC(_resume_switched)
+	.ltorg
+	.popsection
+
 ENTRY(_cpu_resume)
 ENTRY(_cpu_resume)
 	mrs	x1, mpidr_el1
 	mrs	x1, mpidr_el1
 	adrp	x8, mpidr_hash
 	adrp	x8, mpidr_hash

+ 3 - 3
arch/arm64/mm/dump.c

@@ -242,7 +242,7 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
 
 
 static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
 static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
 {
 {
-	pte_t *pte = pte_offset_kernel(pmd, 0);
+	pte_t *pte = pte_offset_kernel(pmd, 0UL);
 	unsigned long addr;
 	unsigned long addr;
 	unsigned i;
 	unsigned i;
 
 
@@ -254,7 +254,7 @@ static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
 
 
 static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
 static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
 {
 {
-	pmd_t *pmd = pmd_offset(pud, 0);
+	pmd_t *pmd = pmd_offset(pud, 0UL);
 	unsigned long addr;
 	unsigned long addr;
 	unsigned i;
 	unsigned i;
 
 
@@ -271,7 +271,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
 
 
 static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
 static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
 {
 {
-	pud_t *pud = pud_offset(pgd, 0);
+	pud_t *pud = pud_offset(pgd, 0UL);
 	unsigned long addr;
 	unsigned long addr;
 	unsigned i;
 	unsigned i;
 
 

+ 2 - 0
arch/arm64/mm/numa.c

@@ -23,6 +23,8 @@
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of.h>
 
 
+#include <asm/acpi.h>
+
 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
 EXPORT_SYMBOL(node_data);
 EXPORT_SYMBOL(node_data);
 nodemask_t numa_nodes_parsed __initdata;
 nodemask_t numa_nodes_parsed __initdata;

+ 2 - 2
arch/parisc/include/uapi/asm/errno.h

@@ -97,10 +97,10 @@
 #define	ENOTCONN	235	/* Transport endpoint is not connected */
 #define	ENOTCONN	235	/* Transport endpoint is not connected */
 #define	ESHUTDOWN	236	/* Cannot send after transport endpoint shutdown */
 #define	ESHUTDOWN	236	/* Cannot send after transport endpoint shutdown */
 #define	ETOOMANYREFS	237	/* Too many references: cannot splice */
 #define	ETOOMANYREFS	237	/* Too many references: cannot splice */
-#define EREFUSED	ECONNREFUSED	/* for HP's NFS apparently */
 #define	ETIMEDOUT	238	/* Connection timed out */
 #define	ETIMEDOUT	238	/* Connection timed out */
 #define	ECONNREFUSED	239	/* Connection refused */
 #define	ECONNREFUSED	239	/* Connection refused */
-#define EREMOTERELEASE	240	/* Remote peer released connection */
+#define	EREFUSED	ECONNREFUSED	/* for HP's NFS apparently */
+#define	EREMOTERELEASE	240	/* Remote peer released connection */
 #define	EHOSTDOWN	241	/* Host is down */
 #define	EHOSTDOWN	241	/* Host is down */
 #define	EHOSTUNREACH	242	/* No route to host */
 #define	EHOSTUNREACH	242	/* No route to host */
 
 

+ 0 - 8
arch/parisc/kernel/processor.c

@@ -51,8 +51,6 @@ EXPORT_SYMBOL(_parisc_requires_coherency);
 
 
 DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
 DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
 
 
-extern int update_cr16_clocksource(void);	/* from time.c */
-
 /*
 /*
 **  	PARISC CPU driver - claim "device" and initialize CPU data structures.
 **  	PARISC CPU driver - claim "device" and initialize CPU data structures.
 **
 **
@@ -228,12 +226,6 @@ static int processor_probe(struct parisc_device *dev)
 	}
 	}
 #endif
 #endif
 
 
-	/* If we've registered more than one cpu,
-	 * we'll use the jiffies clocksource since cr16
-	 * is not synchronized between CPUs.
-	 */
-	update_cr16_clocksource();
-
 	return 0;
 	return 0;
 }
 }
 
 

+ 0 - 12
arch/parisc/kernel/time.c

@@ -221,18 +221,6 @@ static struct clocksource clocksource_cr16 = {
 	.flags			= CLOCK_SOURCE_IS_CONTINUOUS,
 	.flags			= CLOCK_SOURCE_IS_CONTINUOUS,
 };
 };
 
 
-int update_cr16_clocksource(void)
-{
-	/* since the cr16 cycle counters are not synchronized across CPUs,
-	   we'll check if we should switch to a safe clocksource: */
-	if (clocksource_cr16.rating != 0 && num_online_cpus() > 1) {
-		clocksource_change_rating(&clocksource_cr16, 0);
-		return 1;
-	}
-
-	return 0;
-}
-
 void __init start_cpu_itimer(void)
 void __init start_cpu_itimer(void)
 {
 {
 	unsigned int cpu = smp_processor_id();
 	unsigned int cpu = smp_processor_id();

+ 24 - 0
arch/s390/include/asm/facilities_src.h

@@ -55,4 +55,28 @@ static struct facility_def facility_defs[] = {
 			-1 /* END */
 			-1 /* END */
 		}
 		}
 	},
 	},
+	{
+		.name = "FACILITIES_KVM",
+		.bits = (int[]){
+			0,  /* N3 instructions */
+			1,  /* z/Arch mode installed */
+			2,  /* z/Arch mode active */
+			3,  /* DAT-enhancement */
+			4,  /* idte segment table */
+			5,  /* idte region table */
+			6,  /* ASN-and-LX reuse */
+			7,  /* stfle */
+			8,  /* enhanced-DAT 1 */
+			9,  /* sense-running-status */
+			10, /* conditional sske */
+			13, /* ipte-range */
+			14, /* nonquiescing key-setting */
+			73, /* transactional execution */
+			75, /* access-exception-fetch/store indication */
+			76, /* msa extension 3 */
+			77, /* msa extension 4 */
+			78, /* enhanced-DAT 2 */
+			-1  /* END */
+		}
+	},
 };
 };

+ 1 - 1
arch/s390/include/asm/kvm_host.h

@@ -28,7 +28,7 @@
 
 
 #define KVM_S390_BSCA_CPU_SLOTS 64
 #define KVM_S390_BSCA_CPU_SLOTS 64
 #define KVM_S390_ESCA_CPU_SLOTS 248
 #define KVM_S390_ESCA_CPU_SLOTS 248
-#define KVM_MAX_VCPUS KVM_S390_ESCA_CPU_SLOTS
+#define KVM_MAX_VCPUS 255
 #define KVM_USER_MEM_SLOTS 32
 #define KVM_USER_MEM_SLOTS 32
 
 
 /*
 /*

+ 1 - 0
arch/s390/kernel/asm-offsets.c

@@ -125,6 +125,7 @@ int main(void)
 	OFFSET(__LC_STFL_FAC_LIST, lowcore, stfl_fac_list);
 	OFFSET(__LC_STFL_FAC_LIST, lowcore, stfl_fac_list);
 	OFFSET(__LC_STFLE_FAC_LIST, lowcore, stfle_fac_list);
 	OFFSET(__LC_STFLE_FAC_LIST, lowcore, stfle_fac_list);
 	OFFSET(__LC_MCCK_CODE, lowcore, mcck_interruption_code);
 	OFFSET(__LC_MCCK_CODE, lowcore, mcck_interruption_code);
+	OFFSET(__LC_EXT_DAMAGE_CODE, lowcore, external_damage_code);
 	OFFSET(__LC_MCCK_FAIL_STOR_ADDR, lowcore, failing_storage_address);
 	OFFSET(__LC_MCCK_FAIL_STOR_ADDR, lowcore, failing_storage_address);
 	OFFSET(__LC_LAST_BREAK, lowcore, breaking_event_addr);
 	OFFSET(__LC_LAST_BREAK, lowcore, breaking_event_addr);
 	OFFSET(__LC_RST_OLD_PSW, lowcore, restart_old_psw);
 	OFFSET(__LC_RST_OLD_PSW, lowcore, restart_old_psw);

+ 18 - 19
arch/s390/kvm/gaccess.c

@@ -495,6 +495,18 @@ static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
 	tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
 	tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
 
 
 	switch (code) {
 	switch (code) {
+	case PGM_PROTECTION:
+		switch (prot) {
+		case PROT_TYPE_ALC:
+			tec->b60 = 1;
+			/* FALL THROUGH */
+		case PROT_TYPE_DAT:
+			tec->b61 = 1;
+			break;
+		default: /* LA and KEYC set b61 to 0, other params undefined */
+			return code;
+		}
+		/* FALL THROUGH */
 	case PGM_ASCE_TYPE:
 	case PGM_ASCE_TYPE:
 	case PGM_PAGE_TRANSLATION:
 	case PGM_PAGE_TRANSLATION:
 	case PGM_REGION_FIRST_TRANS:
 	case PGM_REGION_FIRST_TRANS:
@@ -504,8 +516,7 @@ static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
 		/*
 		/*
 		 * op_access_id only applies to MOVE_PAGE -> set bit 61
 		 * op_access_id only applies to MOVE_PAGE -> set bit 61
 		 * exc_access_id has to be set to 0 for some instructions. Both
 		 * exc_access_id has to be set to 0 for some instructions. Both
-		 * cases have to be handled by the caller. We can always store
-		 * exc_access_id, as it is undefined for non-ar cases.
+		 * cases have to be handled by the caller.
 		 */
 		 */
 		tec->addr = gva >> PAGE_SHIFT;
 		tec->addr = gva >> PAGE_SHIFT;
 		tec->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH;
 		tec->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH;
@@ -516,25 +527,13 @@ static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
 	case PGM_ASTE_VALIDITY:
 	case PGM_ASTE_VALIDITY:
 	case PGM_ASTE_SEQUENCE:
 	case PGM_ASTE_SEQUENCE:
 	case PGM_EXTENDED_AUTHORITY:
 	case PGM_EXTENDED_AUTHORITY:
+		/*
+		 * We can always store exc_access_id, as it is
+		 * undefined for non-ar cases. It is undefined for
+		 * most DAT protection exceptions.
+		 */
 		pgm->exc_access_id = ar;
 		pgm->exc_access_id = ar;
 		break;
 		break;
-	case PGM_PROTECTION:
-		switch (prot) {
-		case PROT_TYPE_ALC:
-			tec->b60 = 1;
-			/* FALL THROUGH */
-		case PROT_TYPE_DAT:
-			tec->b61 = 1;
-			tec->addr = gva >> PAGE_SHIFT;
-			tec->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH;
-			tec->as = psw_bits(vcpu->arch.sie_block->gpsw).as;
-			/* exc_access_id is undefined for most cases */
-			pgm->exc_access_id = ar;
-			break;
-		default: /* LA and KEYC set b61 to 0, other params undefined */
-			break;
-		}
-		break;
 	}
 	}
 	return code;
 	return code;
 }
 }

+ 30 - 29
arch/s390/kvm/guestdbg.c

@@ -206,7 +206,7 @@ static int __import_wp_info(struct kvm_vcpu *vcpu,
 int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
 int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
 			    struct kvm_guest_debug *dbg)
 			    struct kvm_guest_debug *dbg)
 {
 {
-	int ret = 0, nr_wp = 0, nr_bp = 0, i, size;
+	int ret = 0, nr_wp = 0, nr_bp = 0, i;
 	struct kvm_hw_breakpoint *bp_data = NULL;
 	struct kvm_hw_breakpoint *bp_data = NULL;
 	struct kvm_hw_wp_info_arch *wp_info = NULL;
 	struct kvm_hw_wp_info_arch *wp_info = NULL;
 	struct kvm_hw_bp_info_arch *bp_info = NULL;
 	struct kvm_hw_bp_info_arch *bp_info = NULL;
@@ -216,17 +216,10 @@ int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
 	else if (dbg->arch.nr_hw_bp > MAX_BP_COUNT)
 	else if (dbg->arch.nr_hw_bp > MAX_BP_COUNT)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	size = dbg->arch.nr_hw_bp * sizeof(struct kvm_hw_breakpoint);
-	bp_data = kmalloc(size, GFP_KERNEL);
-	if (!bp_data) {
-		ret = -ENOMEM;
-		goto error;
-	}
-
-	if (copy_from_user(bp_data, dbg->arch.hw_bp, size)) {
-		ret = -EFAULT;
-		goto error;
-	}
+	bp_data = memdup_user(dbg->arch.hw_bp,
+			      sizeof(*bp_data) * dbg->arch.nr_hw_bp);
+	if (IS_ERR(bp_data))
+		return PTR_ERR(bp_data);
 
 
 	for (i = 0; i < dbg->arch.nr_hw_bp; i++) {
 	for (i = 0; i < dbg->arch.nr_hw_bp; i++) {
 		switch (bp_data[i].type) {
 		switch (bp_data[i].type) {
@@ -241,17 +234,19 @@ int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
 		}
 		}
 	}
 	}
 
 
-	size = nr_wp * sizeof(struct kvm_hw_wp_info_arch);
-	if (size > 0) {
-		wp_info = kmalloc(size, GFP_KERNEL);
+	if (nr_wp > 0) {
+		wp_info = kmalloc_array(nr_wp,
+					sizeof(*wp_info),
+					GFP_KERNEL);
 		if (!wp_info) {
 		if (!wp_info) {
 			ret = -ENOMEM;
 			ret = -ENOMEM;
 			goto error;
 			goto error;
 		}
 		}
 	}
 	}
-	size = nr_bp * sizeof(struct kvm_hw_bp_info_arch);
-	if (size > 0) {
-		bp_info = kmalloc(size, GFP_KERNEL);
+	if (nr_bp > 0) {
+		bp_info = kmalloc_array(nr_bp,
+					sizeof(*bp_info),
+					GFP_KERNEL);
 		if (!bp_info) {
 		if (!bp_info) {
 			ret = -ENOMEM;
 			ret = -ENOMEM;
 			goto error;
 			goto error;
@@ -382,14 +377,20 @@ void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu)
 	vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING;
 	vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING;
 }
 }
 
 
+#define PER_CODE_MASK		(PER_EVENT_MASK >> 24)
+#define PER_CODE_BRANCH		(PER_EVENT_BRANCH >> 24)
+#define PER_CODE_IFETCH		(PER_EVENT_IFETCH >> 24)
+#define PER_CODE_STORE		(PER_EVENT_STORE >> 24)
+#define PER_CODE_STORE_REAL	(PER_EVENT_STORE_REAL >> 24)
+
 #define per_bp_event(code) \
 #define per_bp_event(code) \
-			(code & (PER_EVENT_IFETCH | PER_EVENT_BRANCH))
+			(code & (PER_CODE_IFETCH | PER_CODE_BRANCH))
 #define per_write_wp_event(code) \
 #define per_write_wp_event(code) \
-			(code & (PER_EVENT_STORE | PER_EVENT_STORE_REAL))
+			(code & (PER_CODE_STORE | PER_CODE_STORE_REAL))
 
 
 static int debug_exit_required(struct kvm_vcpu *vcpu)
 static int debug_exit_required(struct kvm_vcpu *vcpu)
 {
 {
-	u32 perc = (vcpu->arch.sie_block->perc << 24);
+	u8 perc = vcpu->arch.sie_block->perc;
 	struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch;
 	struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch;
 	struct kvm_hw_wp_info_arch *wp_info = NULL;
 	struct kvm_hw_wp_info_arch *wp_info = NULL;
 	struct kvm_hw_bp_info_arch *bp_info = NULL;
 	struct kvm_hw_bp_info_arch *bp_info = NULL;
@@ -444,7 +445,7 @@ int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu)
 	const u8 ilen = kvm_s390_get_ilen(vcpu);
 	const u8 ilen = kvm_s390_get_ilen(vcpu);
 	struct kvm_s390_pgm_info pgm_info = {
 	struct kvm_s390_pgm_info pgm_info = {
 		.code = PGM_PER,
 		.code = PGM_PER,
-		.per_code = PER_EVENT_IFETCH >> 24,
+		.per_code = PER_CODE_IFETCH,
 		.per_address = __rewind_psw(vcpu->arch.sie_block->gpsw, ilen),
 		.per_address = __rewind_psw(vcpu->arch.sie_block->gpsw, ilen),
 	};
 	};
 
 
@@ -458,33 +459,33 @@ int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu)
 
 
 static void filter_guest_per_event(struct kvm_vcpu *vcpu)
 static void filter_guest_per_event(struct kvm_vcpu *vcpu)
 {
 {
-	u32 perc = vcpu->arch.sie_block->perc << 24;
+	const u8 perc = vcpu->arch.sie_block->perc;
 	u64 peraddr = vcpu->arch.sie_block->peraddr;
 	u64 peraddr = vcpu->arch.sie_block->peraddr;
 	u64 addr = vcpu->arch.sie_block->gpsw.addr;
 	u64 addr = vcpu->arch.sie_block->gpsw.addr;
 	u64 cr9 = vcpu->arch.sie_block->gcr[9];
 	u64 cr9 = vcpu->arch.sie_block->gcr[9];
 	u64 cr10 = vcpu->arch.sie_block->gcr[10];
 	u64 cr10 = vcpu->arch.sie_block->gcr[10];
 	u64 cr11 = vcpu->arch.sie_block->gcr[11];
 	u64 cr11 = vcpu->arch.sie_block->gcr[11];
 	/* filter all events, demanded by the guest */
 	/* filter all events, demanded by the guest */
-	u32 guest_perc = perc & cr9 & PER_EVENT_MASK;
+	u8 guest_perc = perc & (cr9 >> 24) & PER_CODE_MASK;
 
 
 	if (!guest_per_enabled(vcpu))
 	if (!guest_per_enabled(vcpu))
 		guest_perc = 0;
 		guest_perc = 0;
 
 
 	/* filter "successful-branching" events */
 	/* filter "successful-branching" events */
-	if (guest_perc & PER_EVENT_BRANCH &&
+	if (guest_perc & PER_CODE_BRANCH &&
 	    cr9 & PER_CONTROL_BRANCH_ADDRESS &&
 	    cr9 & PER_CONTROL_BRANCH_ADDRESS &&
 	    !in_addr_range(addr, cr10, cr11))
 	    !in_addr_range(addr, cr10, cr11))
-		guest_perc &= ~PER_EVENT_BRANCH;
+		guest_perc &= ~PER_CODE_BRANCH;
 
 
 	/* filter "instruction-fetching" events */
 	/* filter "instruction-fetching" events */
-	if (guest_perc & PER_EVENT_IFETCH &&
+	if (guest_perc & PER_CODE_IFETCH &&
 	    !in_addr_range(peraddr, cr10, cr11))
 	    !in_addr_range(peraddr, cr10, cr11))
-		guest_perc &= ~PER_EVENT_IFETCH;
+		guest_perc &= ~PER_CODE_IFETCH;
 
 
 	/* All other PER events will be given to the guest */
 	/* All other PER events will be given to the guest */
 	/* TODO: Check altered address/address space */
 	/* TODO: Check altered address/address space */
 
 
-	vcpu->arch.sie_block->perc = guest_perc >> 24;
+	vcpu->arch.sie_block->perc = guest_perc;
 
 
 	if (!guest_perc)
 	if (!guest_perc)
 		vcpu->arch.sie_block->iprcc &= ~PGM_PER;
 		vcpu->arch.sie_block->iprcc &= ~PGM_PER;

+ 1 - 0
arch/s390/kvm/intercept.c

@@ -29,6 +29,7 @@ static const intercept_handler_t instruction_handlers[256] = {
 	[0x01] = kvm_s390_handle_01,
 	[0x01] = kvm_s390_handle_01,
 	[0x82] = kvm_s390_handle_lpsw,
 	[0x82] = kvm_s390_handle_lpsw,
 	[0x83] = kvm_s390_handle_diag,
 	[0x83] = kvm_s390_handle_diag,
+	[0xaa] = kvm_s390_handle_aa,
 	[0xae] = kvm_s390_handle_sigp,
 	[0xae] = kvm_s390_handle_sigp,
 	[0xb2] = kvm_s390_handle_b2,
 	[0xb2] = kvm_s390_handle_b2,
 	[0xb6] = kvm_s390_handle_stctl,
 	[0xb6] = kvm_s390_handle_stctl,

+ 75 - 23
arch/s390/kvm/interrupt.c

@@ -24,6 +24,8 @@
 #include <asm/sclp.h>
 #include <asm/sclp.h>
 #include <asm/isc.h>
 #include <asm/isc.h>
 #include <asm/gmap.h>
 #include <asm/gmap.h>
+#include <asm/switch_to.h>
+#include <asm/nmi.h>
 #include "kvm-s390.h"
 #include "kvm-s390.h"
 #include "gaccess.h"
 #include "gaccess.h"
 #include "trace-s390.h"
 #include "trace-s390.h"
@@ -40,6 +42,7 @@ static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
 	if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND))
 	if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND))
 		return 0;
 		return 0;
 
 
+	BUG_ON(!kvm_s390_use_sca_entries());
 	read_lock(&vcpu->kvm->arch.sca_lock);
 	read_lock(&vcpu->kvm->arch.sca_lock);
 	if (vcpu->kvm->arch.use_esca) {
 	if (vcpu->kvm->arch.use_esca) {
 		struct esca_block *sca = vcpu->kvm->arch.sca;
 		struct esca_block *sca = vcpu->kvm->arch.sca;
@@ -68,6 +71,7 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
 {
 {
 	int expect, rc;
 	int expect, rc;
 
 
+	BUG_ON(!kvm_s390_use_sca_entries());
 	read_lock(&vcpu->kvm->arch.sca_lock);
 	read_lock(&vcpu->kvm->arch.sca_lock);
 	if (vcpu->kvm->arch.use_esca) {
 	if (vcpu->kvm->arch.use_esca) {
 		struct esca_block *sca = vcpu->kvm->arch.sca;
 		struct esca_block *sca = vcpu->kvm->arch.sca;
@@ -109,6 +113,8 @@ static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
 	int rc, expect;
 	int rc, expect;
 
 
+	if (!kvm_s390_use_sca_entries())
+		return;
 	atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
 	atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
 	read_lock(&vcpu->kvm->arch.sca_lock);
 	read_lock(&vcpu->kvm->arch.sca_lock);
 	if (vcpu->kvm->arch.use_esca) {
 	if (vcpu->kvm->arch.use_esca) {
@@ -400,12 +406,78 @@ static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
 	return rc ? -EFAULT : 0;
 	return rc ? -EFAULT : 0;
 }
 }
 
 
+static int __write_machine_check(struct kvm_vcpu *vcpu,
+				 struct kvm_s390_mchk_info *mchk)
+{
+	unsigned long ext_sa_addr;
+	freg_t fprs[NUM_FPRS];
+	union mci mci;
+	int rc;
+
+	mci.val = mchk->mcic;
+	/* take care of lazy register loading via vcpu load/put */
+	save_fpu_regs();
+	save_access_regs(vcpu->run->s.regs.acrs);
+
+	/* Extended save area */
+	rc = read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR, &ext_sa_addr,
+			    sizeof(unsigned long));
+	/* Only bits 0-53 are used for address formation */
+	ext_sa_addr &= ~0x3ffUL;
+	if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) {
+		if (write_guest_abs(vcpu, ext_sa_addr, vcpu->run->s.regs.vrs,
+				    512))
+			mci.vr = 0;
+	} else {
+		mci.vr = 0;
+	}
+
+	/* General interruption information */
+	rc |= put_guest_lc(vcpu, 1, (u8 __user *) __LC_AR_MODE_ID);
+	rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
+			     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+	rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
+			    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+	rc |= put_guest_lc(vcpu, mci.val, (u64 __user *) __LC_MCCK_CODE);
+
+	/* Register-save areas */
+	if (MACHINE_HAS_VX) {
+		convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
+		rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA, fprs, 128);
+	} else {
+		rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA,
+				     vcpu->run->s.regs.fprs, 128);
+	}
+	rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA,
+			     vcpu->run->s.regs.gprs, 128);
+	rc |= put_guest_lc(vcpu, current->thread.fpu.fpc,
+			   (u32 __user *) __LC_FP_CREG_SAVE_AREA);
+	rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr,
+			   (u32 __user *) __LC_TOD_PROGREG_SAVE_AREA);
+	rc |= put_guest_lc(vcpu, kvm_s390_get_cpu_timer(vcpu),
+			   (u64 __user *) __LC_CPU_TIMER_SAVE_AREA);
+	rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->ckc >> 8,
+			   (u64 __user *) __LC_CLOCK_COMP_SAVE_AREA);
+	rc |= write_guest_lc(vcpu, __LC_AREGS_SAVE_AREA,
+			     &vcpu->run->s.regs.acrs, 64);
+	rc |= write_guest_lc(vcpu, __LC_CREGS_SAVE_AREA,
+			     &vcpu->arch.sie_block->gcr, 128);
+
+	/* Extended interruption information */
+	rc |= put_guest_lc(vcpu, mchk->ext_damage_code,
+			   (u32 __user *) __LC_EXT_DAMAGE_CODE);
+	rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
+			   (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
+	rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, &mchk->fixed_logout,
+			     sizeof(mchk->fixed_logout));
+	return rc ? -EFAULT : 0;
+}
+
 static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
 static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
 {
 {
 	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
 	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
 	struct kvm_s390_mchk_info mchk = {};
 	struct kvm_s390_mchk_info mchk = {};
-	unsigned long adtl_status_addr;
 	int deliver = 0;
 	int deliver = 0;
 	int rc = 0;
 	int rc = 0;
 
 
@@ -446,29 +518,9 @@ static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
 		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
 		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
 						 KVM_S390_MCHK,
 						 KVM_S390_MCHK,
 						 mchk.cr14, mchk.mcic);
 						 mchk.cr14, mchk.mcic);
-
-		rc  = kvm_s390_vcpu_store_status(vcpu,
-						 KVM_S390_STORE_STATUS_PREFIXED);
-		rc |= read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR,
-				    &adtl_status_addr,
-				    sizeof(unsigned long));
-		rc |= kvm_s390_vcpu_store_adtl_status(vcpu,
-						      adtl_status_addr);
-		rc |= put_guest_lc(vcpu, mchk.mcic,
-				   (u64 __user *) __LC_MCCK_CODE);
-		rc |= put_guest_lc(vcpu, mchk.failing_storage_address,
-				   (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
-		rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
-				     &mchk.fixed_logout,
-				     sizeof(mchk.fixed_logout));
-		rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
-				     &vcpu->arch.sie_block->gpsw,
-				     sizeof(psw_t));
-		rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
-				    &vcpu->arch.sie_block->gpsw,
-				    sizeof(psw_t));
+		rc = __write_machine_check(vcpu, &mchk);
 	}
 	}
-	return rc ? -EFAULT : 0;
+	return rc;
 }
 }
 
 
 static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
 static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)

+ 31 - 39
arch/s390/kvm/kvm-s390.c

@@ -132,10 +132,7 @@ module_param(nested, int, S_IRUGO);
 MODULE_PARM_DESC(nested, "Nested virtualization support");
 MODULE_PARM_DESC(nested, "Nested virtualization support");
 
 
 /* upper facilities limit for kvm */
 /* upper facilities limit for kvm */
-unsigned long kvm_s390_fac_list_mask[16] = {
-	0xffe6000000000000UL,
-	0x005e000000000000UL,
-};
+unsigned long kvm_s390_fac_list_mask[16] = { FACILITIES_KVM };
 
 
 unsigned long kvm_s390_fac_list_mask_size(void)
 unsigned long kvm_s390_fac_list_mask_size(void)
 {
 {
@@ -376,7 +373,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 	case KVM_CAP_NR_VCPUS:
 	case KVM_CAP_NR_VCPUS:
 	case KVM_CAP_MAX_VCPUS:
 	case KVM_CAP_MAX_VCPUS:
 		r = KVM_S390_BSCA_CPU_SLOTS;
 		r = KVM_S390_BSCA_CPU_SLOTS;
-		if (sclp.has_esca && sclp.has_64bscao)
+		if (!kvm_s390_use_sca_entries())
+			r = KVM_MAX_VCPUS;
+		else if (sclp.has_esca && sclp.has_64bscao)
 			r = KVM_S390_ESCA_CPU_SLOTS;
 			r = KVM_S390_ESCA_CPU_SLOTS;
 		break;
 		break;
 	case KVM_CAP_NR_MEMSLOTS:
 	case KVM_CAP_NR_MEMSLOTS:
@@ -1553,6 +1552,8 @@ static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
 
 
 static void sca_del_vcpu(struct kvm_vcpu *vcpu)
 static void sca_del_vcpu(struct kvm_vcpu *vcpu)
 {
 {
+	if (!kvm_s390_use_sca_entries())
+		return;
 	read_lock(&vcpu->kvm->arch.sca_lock);
 	read_lock(&vcpu->kvm->arch.sca_lock);
 	if (vcpu->kvm->arch.use_esca) {
 	if (vcpu->kvm->arch.use_esca) {
 		struct esca_block *sca = vcpu->kvm->arch.sca;
 		struct esca_block *sca = vcpu->kvm->arch.sca;
@@ -1570,6 +1571,13 @@ static void sca_del_vcpu(struct kvm_vcpu *vcpu)
 
 
 static void sca_add_vcpu(struct kvm_vcpu *vcpu)
 static void sca_add_vcpu(struct kvm_vcpu *vcpu)
 {
 {
+	if (!kvm_s390_use_sca_entries()) {
+		struct bsca_block *sca = vcpu->kvm->arch.sca;
+
+		/* we still need the basic sca for the ipte control */
+		vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
+		vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
+	}
 	read_lock(&vcpu->kvm->arch.sca_lock);
 	read_lock(&vcpu->kvm->arch.sca_lock);
 	if (vcpu->kvm->arch.use_esca) {
 	if (vcpu->kvm->arch.use_esca) {
 		struct esca_block *sca = vcpu->kvm->arch.sca;
 		struct esca_block *sca = vcpu->kvm->arch.sca;
@@ -1650,6 +1658,11 @@ static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
 {
 {
 	int rc;
 	int rc;
 
 
+	if (!kvm_s390_use_sca_entries()) {
+		if (id < KVM_MAX_VCPUS)
+			return true;
+		return false;
+	}
 	if (id < KVM_S390_BSCA_CPU_SLOTS)
 	if (id < KVM_S390_BSCA_CPU_SLOTS)
 		return true;
 		return true;
 	if (!sclp.has_esca || !sclp.has_64bscao)
 	if (!sclp.has_esca || !sclp.has_64bscao)
@@ -1938,8 +1951,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 		vcpu->arch.sie_block->eca |= 1;
 		vcpu->arch.sie_block->eca |= 1;
 	if (sclp.has_sigpif)
 	if (sclp.has_sigpif)
 		vcpu->arch.sie_block->eca |= 0x10000000U;
 		vcpu->arch.sie_block->eca |= 0x10000000U;
-	if (test_kvm_facility(vcpu->kvm, 64))
-		vcpu->arch.sie_block->ecb3 |= 0x01;
 	if (test_kvm_facility(vcpu->kvm, 129)) {
 	if (test_kvm_facility(vcpu->kvm, 129)) {
 		vcpu->arch.sie_block->eca |= 0x00020000;
 		vcpu->arch.sie_block->eca |= 0x00020000;
 		vcpu->arch.sie_block->ecd |= 0x20000000;
 		vcpu->arch.sie_block->ecd |= 0x20000000;
@@ -2694,6 +2705,19 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
 			kvm_clear_async_pf_completion_queue(vcpu);
 			kvm_clear_async_pf_completion_queue(vcpu);
 	}
 	}
+	/*
+	 * If userspace sets the riccb (e.g. after migration) to a valid state,
+	 * we should enable RI here instead of doing the lazy enablement.
+	 */
+	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
+	    test_kvm_facility(vcpu->kvm, 64)) {
+		struct runtime_instr_cb *riccb =
+			(struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
+
+		if (riccb->valid)
+			vcpu->arch.sie_block->ecb3 |= 0x01;
+	}
+
 	kvm_run->kvm_dirty_regs = 0;
 	kvm_run->kvm_dirty_regs = 0;
 }
 }
 
 
@@ -2837,38 +2861,6 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
 	return kvm_s390_store_status_unloaded(vcpu, addr);
 	return kvm_s390_store_status_unloaded(vcpu, addr);
 }
 }
 
 
-/*
- * store additional status at address
- */
-int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
-					unsigned long gpa)
-{
-	/* Only bits 0-53 are used for address formation */
-	if (!(gpa & ~0x3ff))
-		return 0;
-
-	return write_guest_abs(vcpu, gpa & ~0x3ff,
-			       (void *)&vcpu->run->s.regs.vrs, 512);
-}
-
-int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
-{
-	if (!test_kvm_facility(vcpu->kvm, 129))
-		return 0;
-
-	/*
-	 * The guest VXRS are in the host VXRs due to the lazy
-	 * copying in vcpu load/put. We can simply call save_fpu_regs()
-	 * to save the current register state because we are in the
-	 * middle of a load/put cycle.
-	 *
-	 * Let's update our copies before we save it into the save area.
-	 */
-	save_fpu_regs();
-
-	return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
-}
-
 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
 {
 {
 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);

+ 11 - 3
arch/s390/kvm/kvm-s390.h

@@ -20,6 +20,7 @@
 #include <linux/kvm_host.h>
 #include <linux/kvm_host.h>
 #include <asm/facility.h>
 #include <asm/facility.h>
 #include <asm/processor.h>
 #include <asm/processor.h>
+#include <asm/sclp.h>
 
 
 typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
 typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
 
 
@@ -245,6 +246,7 @@ static inline void kvm_s390_retry_instr(struct kvm_vcpu *vcpu)
 
 
 /* implemented in priv.c */
 /* implemented in priv.c */
 int is_valid_psw(psw_t *psw);
 int is_valid_psw(psw_t *psw);
+int kvm_s390_handle_aa(struct kvm_vcpu *vcpu);
 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
 int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
 int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
@@ -273,10 +275,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu);
 void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod);
 void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod);
 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
-int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
-					unsigned long addr);
 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
-int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr);
 void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
 void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
 void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
 void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
@@ -389,4 +388,13 @@ static inline union ipte_control *kvm_s390_get_ipte_control(struct kvm *kvm)
 
 
 	return &sca->ipte_control;
 	return &sca->ipte_control;
 }
 }
+static inline int kvm_s390_use_sca_entries(void)
+{
+	/*
+	 * Without SIGP interpretation, only SRS interpretation (if available)
+	 * might use the entries. By not setting the entries and keeping them
+	 * invalid, hardware will not access them but intercept.
+	 */
+	return sclp.has_sigpif;
+}
 #endif
 #endif

+ 21 - 0
arch/s390/kvm/priv.c

@@ -32,6 +32,24 @@
 #include "kvm-s390.h"
 #include "kvm-s390.h"
 #include "trace.h"
 #include "trace.h"
 
 
+static int handle_ri(struct kvm_vcpu *vcpu)
+{
+	if (test_kvm_facility(vcpu->kvm, 64)) {
+		vcpu->arch.sie_block->ecb3 |= 0x01;
+		kvm_s390_retry_instr(vcpu);
+		return 0;
+	} else
+		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
+}
+
+int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
+{
+	if ((vcpu->arch.sie_block->ipa & 0xf) <= 4)
+		return handle_ri(vcpu);
+	else
+		return -EOPNOTSUPP;
+}
+
 /* Handle SCK (SET CLOCK) interception */
 /* Handle SCK (SET CLOCK) interception */
 static int handle_set_clock(struct kvm_vcpu *vcpu)
 static int handle_set_clock(struct kvm_vcpu *vcpu)
 {
 {
@@ -1093,6 +1111,9 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
 static const intercept_handler_t eb_handlers[256] = {
 static const intercept_handler_t eb_handlers[256] = {
 	[0x2f] = handle_lctlg,
 	[0x2f] = handle_lctlg,
 	[0x25] = handle_stctg,
 	[0x25] = handle_stctg,
+	[0x60] = handle_ri,
+	[0x61] = handle_ri,
+	[0x62] = handle_ri,
 };
 };
 
 
 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)

+ 8 - 1
arch/x86/kernel/cpu/microcode/amd.c

@@ -355,6 +355,7 @@ void load_ucode_amd_ap(void)
 	unsigned int cpu = smp_processor_id();
 	unsigned int cpu = smp_processor_id();
 	struct equiv_cpu_entry *eq;
 	struct equiv_cpu_entry *eq;
 	struct microcode_amd *mc;
 	struct microcode_amd *mc;
+	u8 *cont = container;
 	u32 rev, eax;
 	u32 rev, eax;
 	u16 eq_id;
 	u16 eq_id;
 
 
@@ -371,8 +372,11 @@ void load_ucode_amd_ap(void)
 	if (check_current_patch_level(&rev, false))
 	if (check_current_patch_level(&rev, false))
 		return;
 		return;
 
 
+	/* Add CONFIG_RANDOMIZE_MEMORY offset. */
+	cont += PAGE_OFFSET - __PAGE_OFFSET_BASE;
+
 	eax = cpuid_eax(0x00000001);
 	eax = cpuid_eax(0x00000001);
-	eq  = (struct equiv_cpu_entry *)(container + CONTAINER_HDR_SZ);
+	eq  = (struct equiv_cpu_entry *)(cont + CONTAINER_HDR_SZ);
 
 
 	eq_id = find_equiv_id(eq, eax);
 	eq_id = find_equiv_id(eq, eax);
 	if (!eq_id)
 	if (!eq_id)
@@ -434,6 +438,9 @@ int __init save_microcode_in_initrd_amd(void)
 	else
 	else
 		container = cont_va;
 		container = cont_va;
 
 
+	/* Add CONFIG_RANDOMIZE_MEMORY offset. */
+	container += PAGE_OFFSET - __PAGE_OFFSET_BASE;
+
 	eax   = cpuid_eax(0x00000001);
 	eax   = cpuid_eax(0x00000001);
 	eax   = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
 	eax   = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
 
 

+ 16 - 9
arch/x86/kernel/smpboot.c

@@ -100,10 +100,11 @@ EXPORT_PER_CPU_SYMBOL(cpu_info);
 /* Logical package management. We might want to allocate that dynamically */
 /* Logical package management. We might want to allocate that dynamically */
 static int *physical_to_logical_pkg __read_mostly;
 static int *physical_to_logical_pkg __read_mostly;
 static unsigned long *physical_package_map __read_mostly;;
 static unsigned long *physical_package_map __read_mostly;;
-static unsigned long *logical_package_map  __read_mostly;
 static unsigned int max_physical_pkg_id __read_mostly;
 static unsigned int max_physical_pkg_id __read_mostly;
 unsigned int __max_logical_packages __read_mostly;
 unsigned int __max_logical_packages __read_mostly;
 EXPORT_SYMBOL(__max_logical_packages);
 EXPORT_SYMBOL(__max_logical_packages);
+static unsigned int logical_packages __read_mostly;
+static bool logical_packages_frozen __read_mostly;
 
 
 /* Maximum number of SMT threads on any online core */
 /* Maximum number of SMT threads on any online core */
 int __max_smt_threads __read_mostly;
 int __max_smt_threads __read_mostly;
@@ -277,14 +278,14 @@ int topology_update_package_map(unsigned int apicid, unsigned int cpu)
 	if (test_and_set_bit(pkg, physical_package_map))
 	if (test_and_set_bit(pkg, physical_package_map))
 		goto found;
 		goto found;
 
 
-	new = find_first_zero_bit(logical_package_map, __max_logical_packages);
-	if (new >= __max_logical_packages) {
+	if (logical_packages_frozen) {
 		physical_to_logical_pkg[pkg] = -1;
 		physical_to_logical_pkg[pkg] = -1;
-		pr_warn("APIC(%x) Package %u exceeds logical package map\n",
+		pr_warn("APIC(%x) Package %u exceeds logical package max\n",
 			apicid, pkg);
 			apicid, pkg);
 		return -ENOSPC;
 		return -ENOSPC;
 	}
 	}
-	set_bit(new, logical_package_map);
+
+	new = logical_packages++;
 	pr_info("APIC(%x) Converting physical %u to logical package %u\n",
 	pr_info("APIC(%x) Converting physical %u to logical package %u\n",
 		apicid, pkg, new);
 		apicid, pkg, new);
 	physical_to_logical_pkg[pkg] = new;
 	physical_to_logical_pkg[pkg] = new;
@@ -341,6 +342,7 @@ static void __init smp_init_package_map(void)
 	}
 	}
 
 
 	__max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
 	__max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
+	logical_packages = 0;
 
 
 	/*
 	/*
 	 * Possibly larger than what we need as the number of apic ids per
 	 * Possibly larger than what we need as the number of apic ids per
@@ -352,10 +354,6 @@ static void __init smp_init_package_map(void)
 	memset(physical_to_logical_pkg, 0xff, size);
 	memset(physical_to_logical_pkg, 0xff, size);
 	size = BITS_TO_LONGS(max_physical_pkg_id) * sizeof(unsigned long);
 	size = BITS_TO_LONGS(max_physical_pkg_id) * sizeof(unsigned long);
 	physical_package_map = kzalloc(size, GFP_KERNEL);
 	physical_package_map = kzalloc(size, GFP_KERNEL);
-	size = BITS_TO_LONGS(__max_logical_packages) * sizeof(unsigned long);
-	logical_package_map = kzalloc(size, GFP_KERNEL);
-
-	pr_info("Max logical packages: %u\n", __max_logical_packages);
 
 
 	for_each_present_cpu(cpu) {
 	for_each_present_cpu(cpu) {
 		unsigned int apicid = apic->cpu_present_to_apicid(cpu);
 		unsigned int apicid = apic->cpu_present_to_apicid(cpu);
@@ -369,6 +367,15 @@ static void __init smp_init_package_map(void)
 		set_cpu_possible(cpu, false);
 		set_cpu_possible(cpu, false);
 		set_cpu_present(cpu, false);
 		set_cpu_present(cpu, false);
 	}
 	}
+
+	if (logical_packages > __max_logical_packages) {
+		pr_warn("Detected more packages (%u), then computed by BIOS data (%u).\n",
+			logical_packages, __max_logical_packages);
+		logical_packages_frozen = true;
+		__max_logical_packages  = logical_packages;
+	}
+
+	pr_info("Max logical packages: %u\n", __max_logical_packages);
 }
 }
 
 
 void __init smp_store_boot_cpu_info(void)
 void __init smp_store_boot_cpu_info(void)

+ 1 - 1
arch/x86/power/hibernate_64.c

@@ -113,7 +113,7 @@ static int set_up_temporary_mappings(void)
 			return result;
 			return result;
 	}
 	}
 
 
-	temp_level4_pgt = (unsigned long)pgd - __PAGE_OFFSET;
+	temp_level4_pgt = __pa(pgd);
 	return 0;
 	return 0;
 }
 }
 
 

+ 10 - 6
drivers/clocksource/bcm_kona_timer.c

@@ -66,10 +66,10 @@ static void kona_timer_disable_and_clear(void __iomem *base)
 
 
 }
 }
 
 
-static void
+static int
 kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw)
 kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw)
 {
 {
-	int loop_limit = 4;
+	int loop_limit = 3;
 
 
 	/*
 	/*
 	 * Read 64-bit free running counter
 	 * Read 64-bit free running counter
@@ -83,18 +83,19 @@ kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw)
 	 *      if new hi-word is equal to previously read hi-word then stop.
 	 *      if new hi-word is equal to previously read hi-word then stop.
 	 */
 	 */
 
 
-	while (--loop_limit) {
+	do {
 		*msw = readl(timer_base + KONA_GPTIMER_STCHI_OFFSET);
 		*msw = readl(timer_base + KONA_GPTIMER_STCHI_OFFSET);
 		*lsw = readl(timer_base + KONA_GPTIMER_STCLO_OFFSET);
 		*lsw = readl(timer_base + KONA_GPTIMER_STCLO_OFFSET);
 		if (*msw == readl(timer_base + KONA_GPTIMER_STCHI_OFFSET))
 		if (*msw == readl(timer_base + KONA_GPTIMER_STCHI_OFFSET))
 			break;
 			break;
-	}
+	} while (--loop_limit);
 	if (!loop_limit) {
 	if (!loop_limit) {
 		pr_err("bcm_kona_timer: getting counter failed.\n");
 		pr_err("bcm_kona_timer: getting counter failed.\n");
 		pr_err(" Timer will be impacted\n");
 		pr_err(" Timer will be impacted\n");
+		return -ETIMEDOUT;
 	}
 	}
 
 
-	return;
+	return 0;
 }
 }
 
 
 static int kona_timer_set_next_event(unsigned long clc,
 static int kona_timer_set_next_event(unsigned long clc,
@@ -112,8 +113,11 @@ static int kona_timer_set_next_event(unsigned long clc,
 
 
 	uint32_t lsw, msw;
 	uint32_t lsw, msw;
 	uint32_t reg;
 	uint32_t reg;
+	int ret;
 
 
-	kona_timer_get_counter(timers.tmr_regs, &msw, &lsw);
+	ret = kona_timer_get_counter(timers.tmr_regs, &msw, &lsw);
+	if (ret)
+		return ret;
 
 
 	/* Load the "next" event tick value */
 	/* Load the "next" event tick value */
 	writel(lsw + clc, timers.tmr_regs + KONA_GPTIMER_STCM0_OFFSET);
 	writel(lsw + clc, timers.tmr_regs + KONA_GPTIMER_STCM0_OFFSET);

+ 1 - 1
drivers/clocksource/mips-gic-timer.c

@@ -164,7 +164,7 @@ void __init gic_clocksource_init(unsigned int frequency)
 	gic_start_count();
 	gic_start_count();
 }
 }
 
 
-static void __init gic_clocksource_of_init(struct device_node *node)
+static int __init gic_clocksource_of_init(struct device_node *node)
 {
 {
 	struct clk *clk;
 	struct clk *clk;
 	int ret;
 	int ret;

+ 0 - 1
drivers/clocksource/time-armada-370-xp.c

@@ -338,7 +338,6 @@ static int __init armada_xp_timer_init(struct device_node *np)
 	struct clk *clk = of_clk_get_by_name(np, "fixed");
 	struct clk *clk = of_clk_get_by_name(np, "fixed");
 	int ret;
 	int ret;
 
 
-	clk = of_clk_get(np, 0);
 	if (IS_ERR(clk)) {
 	if (IS_ERR(clk)) {
 		pr_err("Failed to get clock");
 		pr_err("Failed to get clock");
 		return PTR_ERR(clk);
 		return PTR_ERR(clk);

+ 8 - 0
drivers/edac/Kconfig

@@ -251,6 +251,14 @@ config EDAC_SBRIDGE
 	  Support for error detection and correction the Intel
 	  Support for error detection and correction the Intel
 	  Sandy Bridge, Ivy Bridge and Haswell Integrated Memory Controllers.
 	  Sandy Bridge, Ivy Bridge and Haswell Integrated Memory Controllers.
 
 
+config EDAC_SKX
+	tristate "Intel Skylake server Integrated MC"
+	depends on EDAC_MM_EDAC && PCI && X86_64 && X86_MCE_INTEL
+	depends on PCI_MMCONFIG
+	help
+	  Support for error detection and correction the Intel
+	  Skylake server Integrated Memory Controllers.
+
 config EDAC_MPC85XX
 config EDAC_MPC85XX
 	tristate "Freescale MPC83xx / MPC85xx"
 	tristate "Freescale MPC83xx / MPC85xx"
 	depends on EDAC_MM_EDAC && FSL_SOC
 	depends on EDAC_MM_EDAC && FSL_SOC

+ 1 - 0
drivers/edac/Makefile

@@ -31,6 +31,7 @@ obj-$(CONFIG_EDAC_I5400)		+= i5400_edac.o
 obj-$(CONFIG_EDAC_I7300)		+= i7300_edac.o
 obj-$(CONFIG_EDAC_I7300)		+= i7300_edac.o
 obj-$(CONFIG_EDAC_I7CORE)		+= i7core_edac.o
 obj-$(CONFIG_EDAC_I7CORE)		+= i7core_edac.o
 obj-$(CONFIG_EDAC_SBRIDGE)		+= sb_edac.o
 obj-$(CONFIG_EDAC_SBRIDGE)		+= sb_edac.o
+obj-$(CONFIG_EDAC_SKX)			+= skx_edac.o
 obj-$(CONFIG_EDAC_E7XXX)		+= e7xxx_edac.o
 obj-$(CONFIG_EDAC_E7XXX)		+= e7xxx_edac.o
 obj-$(CONFIG_EDAC_E752X)		+= e752x_edac.o
 obj-$(CONFIG_EDAC_E752X)		+= e752x_edac.o
 obj-$(CONFIG_EDAC_I82443BXGX)		+= i82443bxgx_edac.o
 obj-$(CONFIG_EDAC_I82443BXGX)		+= i82443bxgx_edac.o

+ 1121 - 0
drivers/edac/skx_edac.c

@@ -0,0 +1,1121 @@
+/*
+ * EDAC driver for Intel(R) Xeon(R) Skylake processors
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/edac.h>
+#include <linux/mmzone.h>
+#include <linux/smp.h>
+#include <linux/bitmap.h>
+#include <linux/math64.h>
+#include <linux/mod_devicetable.h>
+#include <asm/cpu_device_id.h>
+#include <asm/processor.h>
+#include <asm/mce.h>
+
+#include "edac_core.h"
+
+#define SKX_REVISION    " Ver: 1.0 "
+
+/*
+ * Debug macros
+ */
+#define skx_printk(level, fmt, arg...)			\
+	edac_printk(level, "skx", fmt, ##arg)
+
+#define skx_mc_printk(mci, level, fmt, arg...)		\
+	edac_mc_chipset_printk(mci, level, "skx", fmt, ##arg)
+
+/*
+ * Get a bit field at register value <v>, from bit <lo> to bit <hi>
+ */
+#define GET_BITFIELD(v, lo, hi) \
+	(((v) & GENMASK_ULL((hi), (lo))) >> (lo))
+
+static LIST_HEAD(skx_edac_list);
+
+static u64 skx_tolm, skx_tohm;
+
+#define NUM_IMC			2	/* memory controllers per socket */
+#define NUM_CHANNELS		3	/* channels per memory controller */
+#define NUM_DIMMS		2	/* Max DIMMS per channel */
+
+#define	MASK26	0x3FFFFFF		/* Mask for 2^26 */
+#define MASK29	0x1FFFFFFF		/* Mask for 2^29 */
+
+/*
+ * Each cpu socket contains some pci devices that provide global
+ * information, and also some that are local to each of the two
+ * memory controllers on the die.
+ */
+struct skx_dev {
+	struct list_head	list;
+	u8			bus[4];
+	struct pci_dev	*sad_all;
+	struct pci_dev	*util_all;
+	u32	mcroute;
+	struct skx_imc {
+		struct mem_ctl_info *mci;
+		u8	mc;	/* system wide mc# */
+		u8	lmc;	/* socket relative mc# */
+		u8	src_id, node_id;
+		struct skx_channel {
+			struct pci_dev *cdev;
+			struct skx_dimm {
+				u8	close_pg;
+				u8	bank_xor_enable;
+				u8	fine_grain_bank;
+				u8	rowbits;
+				u8	colbits;
+			} dimms[NUM_DIMMS];
+		} chan[NUM_CHANNELS];
+	} imc[NUM_IMC];
+};
+static int skx_num_sockets;
+
+struct skx_pvt {
+	struct skx_imc	*imc;
+};
+
+struct decoded_addr {
+	struct skx_dev *dev;
+	u64	addr;
+	int	socket;
+	int	imc;
+	int	channel;
+	u64	chan_addr;
+	int	sktways;
+	int	chanways;
+	int	dimm;
+	int	rank;
+	int	channel_rank;
+	u64	rank_address;
+	int	row;
+	int	column;
+	int	bank_address;
+	int	bank_group;
+};
+
+static struct skx_dev *get_skx_dev(u8 bus, u8 idx)
+{
+	struct skx_dev *d;
+
+	list_for_each_entry(d, &skx_edac_list, list) {
+		if (d->bus[idx] == bus)
+			return d;
+	}
+
+	return NULL;
+}
+
+enum munittype {
+	CHAN0, CHAN1, CHAN2, SAD_ALL, UTIL_ALL, SAD
+};
+
+struct munit {
+	u16	did;
+	u16	devfn[NUM_IMC];
+	u8	busidx;
+	u8	per_socket;
+	enum munittype mtype;
+};
+
+/*
+ * List of PCI device ids that we need together with some device
+ * number and function numbers to tell which memory controller the
+ * device belongs to.
+ */
+static const struct munit skx_all_munits[] = {
+	{ 0x2054, { }, 1, 1, SAD_ALL },
+	{ 0x2055, { }, 1, 1, UTIL_ALL },
+	{ 0x2040, { PCI_DEVFN(10, 0), PCI_DEVFN(12, 0) }, 2, 2, CHAN0 },
+	{ 0x2044, { PCI_DEVFN(10, 4), PCI_DEVFN(12, 4) }, 2, 2, CHAN1 },
+	{ 0x2048, { PCI_DEVFN(11, 0), PCI_DEVFN(13, 0) }, 2, 2, CHAN2 },
+	{ 0x208e, { }, 1, 0, SAD },
+	{ }
+};
+
+/*
+ * We use the per-socket device 0x2016 to count how many sockets are present,
+ * and to detemine which PCI buses are associated with each socket. Allocate
+ * and build the full list of all the skx_dev structures that we need here.
+ */
+static int get_all_bus_mappings(void)
+{
+	struct pci_dev *pdev, *prev;
+	struct skx_dev *d;
+	u32 reg;
+	int ndev = 0;
+
+	prev = NULL;
+	for (;;) {
+		pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2016, prev);
+		if (!pdev)
+			break;
+		ndev++;
+		d = kzalloc(sizeof(*d), GFP_KERNEL);
+		if (!d) {
+			pci_dev_put(pdev);
+			return -ENOMEM;
+		}
+		pci_read_config_dword(pdev, 0xCC, &reg);
+		d->bus[0] =  GET_BITFIELD(reg, 0, 7);
+		d->bus[1] =  GET_BITFIELD(reg, 8, 15);
+		d->bus[2] =  GET_BITFIELD(reg, 16, 23);
+		d->bus[3] =  GET_BITFIELD(reg, 24, 31);
+		edac_dbg(2, "busses: %x, %x, %x, %x\n",
+			 d->bus[0], d->bus[1], d->bus[2], d->bus[3]);
+		list_add_tail(&d->list, &skx_edac_list);
+		skx_num_sockets++;
+		prev = pdev;
+	}
+
+	return ndev;
+}
+
+static int get_all_munits(const struct munit *m)
+{
+	struct pci_dev *pdev, *prev;
+	struct skx_dev *d;
+	u32 reg;
+	int i = 0, ndev = 0;
+
+	prev = NULL;
+	for (;;) {
+		pdev = pci_get_device(PCI_VENDOR_ID_INTEL, m->did, prev);
+		if (!pdev)
+			break;
+		ndev++;
+		if (m->per_socket == NUM_IMC) {
+			for (i = 0; i < NUM_IMC; i++)
+				if (m->devfn[i] == pdev->devfn)
+					break;
+			if (i == NUM_IMC)
+				goto fail;
+		}
+		d = get_skx_dev(pdev->bus->number, m->busidx);
+		if (!d)
+			goto fail;
+
+		/* Be sure that the device is enabled */
+		if (unlikely(pci_enable_device(pdev) < 0)) {
+			skx_printk(KERN_ERR,
+				"Couldn't enable %04x:%04x\n", PCI_VENDOR_ID_INTEL, m->did);
+			goto fail;
+		}
+
+		switch (m->mtype) {
+		case CHAN0: case CHAN1: case CHAN2:
+			pci_dev_get(pdev);
+			d->imc[i].chan[m->mtype].cdev = pdev;
+			break;
+		case SAD_ALL:
+			pci_dev_get(pdev);
+			d->sad_all = pdev;
+			break;
+		case UTIL_ALL:
+			pci_dev_get(pdev);
+			d->util_all = pdev;
+			break;
+		case SAD:
+			/*
+			 * one of these devices per core, including cores
+			 * that don't exist on this SKU. Ignore any that
+			 * read a route table of zero, make sure all the
+			 * non-zero values match.
+			 */
+			pci_read_config_dword(pdev, 0xB4, &reg);
+			if (reg != 0) {
+				if (d->mcroute == 0)
+					d->mcroute = reg;
+				else if (d->mcroute != reg) {
+					skx_printk(KERN_ERR,
+						"mcroute mismatch\n");
+					goto fail;
+				}
+			}
+			ndev--;
+			break;
+		}
+
+		prev = pdev;
+	}
+
+	return ndev;
+fail:
+	pci_dev_put(pdev);
+	return -ENODEV;
+}
+
+const struct x86_cpu_id skx_cpuids[] = {
+	{ X86_VENDOR_INTEL, 6, 0x55, 0, 0 },	/* Skylake */
+	{ }
+};
+MODULE_DEVICE_TABLE(x86cpu, skx_cpuids);
+
+static u8 get_src_id(struct skx_dev *d)
+{
+	u32 reg;
+
+	pci_read_config_dword(d->util_all, 0xF0, &reg);
+
+	return GET_BITFIELD(reg, 12, 14);
+}
+
+static u8 skx_get_node_id(struct skx_dev *d)
+{
+	u32 reg;
+
+	pci_read_config_dword(d->util_all, 0xF4, &reg);
+
+	return GET_BITFIELD(reg, 0, 2);
+}
+
+static int get_dimm_attr(u32 reg, int lobit, int hibit, int add, int minval,
+			 int maxval, char *name)
+{
+	u32 val = GET_BITFIELD(reg, lobit, hibit);
+
+	if (val < minval || val > maxval) {
+		edac_dbg(2, "bad %s = %d (raw=%x)\n", name, val, reg);
+		return -EINVAL;
+	}
+	return val + add;
+}
+
+#define IS_DIMM_PRESENT(mtr)		GET_BITFIELD((mtr), 15, 15)
+
+#define numrank(reg) get_dimm_attr((reg), 12, 13, 0, 1, 2, "ranks")
+#define numrow(reg) get_dimm_attr((reg), 2, 4, 12, 1, 6, "rows")
+#define numcol(reg) get_dimm_attr((reg), 0, 1, 10, 0, 2, "cols")
+
+static int get_width(u32 mtr)
+{
+	switch (GET_BITFIELD(mtr, 8, 9)) {
+	case 0:
+		return DEV_X4;
+	case 1:
+		return DEV_X8;
+	case 2:
+		return DEV_X16;
+	}
+	return DEV_UNKNOWN;
+}
+
+static int skx_get_hi_lo(void)
+{
+	struct pci_dev *pdev;
+	u32 reg;
+
+	pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2034, NULL);
+	if (!pdev) {
+		edac_dbg(0, "Can't get tolm/tohm\n");
+		return -ENODEV;
+	}
+
+	pci_read_config_dword(pdev, 0xD0, &reg);
+	skx_tolm = reg;
+	pci_read_config_dword(pdev, 0xD4, &reg);
+	skx_tohm = reg;
+	pci_read_config_dword(pdev, 0xD8, &reg);
+	skx_tohm |= (u64)reg << 32;
+
+	pci_dev_put(pdev);
+	edac_dbg(2, "tolm=%llx tohm=%llx\n", skx_tolm, skx_tohm);
+
+	return 0;
+}
+
+static int get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm,
+			 struct skx_imc *imc, int chan, int dimmno)
+{
+	int  banks = 16, ranks, rows, cols, npages;
+	u64 size;
+
+	if (!IS_DIMM_PRESENT(mtr))
+		return 0;
+	ranks = numrank(mtr);
+	rows = numrow(mtr);
+	cols = numcol(mtr);
+
+	/*
+	 * Compute size in 8-byte (2^3) words, then shift to MiB (2^20)
+	 */
+	size = ((1ull << (rows + cols + ranks)) * banks) >> (20 - 3);
+	npages = MiB_TO_PAGES(size);
+
+	edac_dbg(0, "mc#%d: channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
+		 imc->mc, chan, dimmno, size, npages,
+		 banks, ranks, rows, cols);
+
+	imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mtr, 0, 0);
+	imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mtr, 9, 9);
+	imc->chan[chan].dimms[dimmno].fine_grain_bank = GET_BITFIELD(amap, 0, 0);
+	imc->chan[chan].dimms[dimmno].rowbits = rows;
+	imc->chan[chan].dimms[dimmno].colbits = cols;
+
+	dimm->nr_pages = npages;
+	dimm->grain = 32;
+	dimm->dtype = get_width(mtr);
+	dimm->mtype = MEM_DDR4;
+	dimm->edac_mode = EDAC_SECDED; /* likely better than this */
+	snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_MC#%u_Chan#%u_DIMM#%u",
+		 imc->src_id, imc->lmc, chan, dimmno);
+
+	return 1;
+}
+
+#define SKX_GET_MTMTR(dev, reg) \
+	pci_read_config_dword((dev), 0x87c, &reg)
+
+static bool skx_check_ecc(struct pci_dev *pdev)
+{
+	u32 mtmtr;
+
+	SKX_GET_MTMTR(pdev, mtmtr);
+
+	return !!GET_BITFIELD(mtmtr, 2, 2);
+}
+
+static int skx_get_dimm_config(struct mem_ctl_info *mci)
+{
+	struct skx_pvt *pvt = mci->pvt_info;
+	struct skx_imc *imc = pvt->imc;
+	struct dimm_info *dimm;
+	int i, j;
+	u32 mtr, amap;
+	int ndimms;
+
+	for (i = 0; i < NUM_CHANNELS; i++) {
+		ndimms = 0;
+		pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap);
+		for (j = 0; j < NUM_DIMMS; j++) {
+			dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
+					     mci->n_layers, i, j, 0);
+			pci_read_config_dword(imc->chan[i].cdev,
+					0x80 + 4*j, &mtr);
+			ndimms += get_dimm_info(mtr, amap, dimm, imc, i, j);
+		}
+		if (ndimms && !skx_check_ecc(imc->chan[0].cdev)) {
+			skx_printk(KERN_ERR, "ECC is disabled on imc %d\n", imc->mc);
+			return -ENODEV;
+		}
+	}
+
+	return 0;
+}
+
+static void skx_unregister_mci(struct skx_imc *imc)
+{
+	struct mem_ctl_info *mci = imc->mci;
+
+	if (!mci)
+		return;
+
+	edac_dbg(0, "MC%d: mci = %p\n", imc->mc, mci);
+
+	/* Remove MC sysfs nodes */
+	edac_mc_del_mc(mci->pdev);
+
+	edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
+	kfree(mci->ctl_name);
+	edac_mc_free(mci);
+}
+
+static int skx_register_mci(struct skx_imc *imc)
+{
+	struct mem_ctl_info *mci;
+	struct edac_mc_layer layers[2];
+	struct pci_dev *pdev = imc->chan[0].cdev;
+	struct skx_pvt *pvt;
+	int rc;
+
+	/* allocate a new MC control structure */
+	layers[0].type = EDAC_MC_LAYER_CHANNEL;
+	layers[0].size = NUM_CHANNELS;
+	layers[0].is_virt_csrow = false;
+	layers[1].type = EDAC_MC_LAYER_SLOT;
+	layers[1].size = NUM_DIMMS;
+	layers[1].is_virt_csrow = true;
+	mci = edac_mc_alloc(imc->mc, ARRAY_SIZE(layers), layers,
+			    sizeof(struct skx_pvt));
+
+	if (unlikely(!mci))
+		return -ENOMEM;
+
+	edac_dbg(0, "MC#%d: mci = %p\n", imc->mc, mci);
+
+	/* Associate skx_dev and mci for future usage */
+	imc->mci = mci;
+	pvt = mci->pvt_info;
+	pvt->imc = imc;
+
+	mci->ctl_name = kasprintf(GFP_KERNEL, "Skylake Socket#%d IMC#%d",
+				  imc->node_id, imc->lmc);
+	mci->mtype_cap = MEM_FLAG_DDR4;
+	mci->edac_ctl_cap = EDAC_FLAG_NONE;
+	mci->edac_cap = EDAC_FLAG_NONE;
+	mci->mod_name = "skx_edac.c";
+	mci->dev_name = pci_name(imc->chan[0].cdev);
+	mci->mod_ver = SKX_REVISION;
+	mci->ctl_page_to_phys = NULL;
+
+	rc = skx_get_dimm_config(mci);
+	if (rc < 0)
+		goto fail;
+
+	/* record ptr to the generic device */
+	mci->pdev = &pdev->dev;
+
+	/* add this new MC control structure to EDAC's list of MCs */
+	if (unlikely(edac_mc_add_mc(mci))) {
+		edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
+		rc = -EINVAL;
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	kfree(mci->ctl_name);
+	edac_mc_free(mci);
+	imc->mci = NULL;
+	return rc;
+}
+
+#define	SKX_MAX_SAD 24
+
+#define SKX_GET_SAD(d, i, reg)	\
+	pci_read_config_dword((d)->sad_all, 0x60 + 8 * (i), &reg)
+#define SKX_GET_ILV(d, i, reg)	\
+	pci_read_config_dword((d)->sad_all, 0x64 + 8 * (i), &reg)
+
+#define	SKX_SAD_MOD3MODE(sad)	GET_BITFIELD((sad), 30, 31)
+#define	SKX_SAD_MOD3(sad)	GET_BITFIELD((sad), 27, 27)
+#define SKX_SAD_LIMIT(sad)	(((u64)GET_BITFIELD((sad), 7, 26) << 26) | MASK26)
+#define	SKX_SAD_MOD3ASMOD2(sad)	GET_BITFIELD((sad), 5, 6)
+#define	SKX_SAD_ATTR(sad)	GET_BITFIELD((sad), 3, 4)
+#define	SKX_SAD_INTERLEAVE(sad)	GET_BITFIELD((sad), 1, 2)
+#define SKX_SAD_ENABLE(sad)	GET_BITFIELD((sad), 0, 0)
+
+#define SKX_ILV_REMOTE(tgt)	(((tgt) & 8) == 0)
+#define SKX_ILV_TARGET(tgt)	((tgt) & 7)
+
+static bool skx_sad_decode(struct decoded_addr *res)
+{
+	struct skx_dev *d = list_first_entry(&skx_edac_list, typeof(*d), list);
+	u64 addr = res->addr;
+	int i, idx, tgt, lchan, shift;
+	u32 sad, ilv;
+	u64 limit, prev_limit;
+	int remote = 0;
+
+	/* Simple sanity check for I/O space or out of range */
+	if (addr >= skx_tohm || (addr >= skx_tolm && addr < BIT_ULL(32))) {
+		edac_dbg(0, "Address %llx out of range\n", addr);
+		return false;
+	}
+
+restart:
+	prev_limit = 0;
+	for (i = 0; i < SKX_MAX_SAD; i++) {
+		SKX_GET_SAD(d, i, sad);
+		limit = SKX_SAD_LIMIT(sad);
+		if (SKX_SAD_ENABLE(sad)) {
+			if (addr >= prev_limit && addr <= limit)
+				goto sad_found;
+		}
+		prev_limit = limit + 1;
+	}
+	edac_dbg(0, "No SAD entry for %llx\n", addr);
+	return false;
+
+sad_found:
+	SKX_GET_ILV(d, i, ilv);
+
+	switch (SKX_SAD_INTERLEAVE(sad)) {
+	case 0:
+		idx = GET_BITFIELD(addr, 6, 8);
+		break;
+	case 1:
+		idx = GET_BITFIELD(addr, 8, 10);
+		break;
+	case 2:
+		idx = GET_BITFIELD(addr, 12, 14);
+		break;
+	case 3:
+		idx = GET_BITFIELD(addr, 30, 32);
+		break;
+	}
+
+	tgt = GET_BITFIELD(ilv, 4 * idx, 4 * idx + 3);
+
+	/* If point to another node, find it and start over */
+	if (SKX_ILV_REMOTE(tgt)) {
+		if (remote) {
+			edac_dbg(0, "Double remote!\n");
+			return false;
+		}
+		remote = 1;
+		list_for_each_entry(d, &skx_edac_list, list) {
+			if (d->imc[0].src_id == SKX_ILV_TARGET(tgt))
+				goto restart;
+		}
+		edac_dbg(0, "Can't find node %d\n", SKX_ILV_TARGET(tgt));
+		return false;
+	}
+
+	if (SKX_SAD_MOD3(sad) == 0)
+		lchan = SKX_ILV_TARGET(tgt);
+	else {
+		switch (SKX_SAD_MOD3MODE(sad)) {
+		case 0:
+			shift = 6;
+			break;
+		case 1:
+			shift = 8;
+			break;
+		case 2:
+			shift = 12;
+			break;
+		default:
+			edac_dbg(0, "illegal mod3mode\n");
+			return false;
+		}
+		switch (SKX_SAD_MOD3ASMOD2(sad)) {
+		case 0:
+			lchan = (addr >> shift) % 3;
+			break;
+		case 1:
+			lchan = (addr >> shift) % 2;
+			break;
+		case 2:
+			lchan = (addr >> shift) % 2;
+			lchan = (lchan << 1) | ~lchan;
+			break;
+		case 3:
+			lchan = ((addr >> shift) % 2) << 1;
+			break;
+		}
+		lchan = (lchan << 1) | (SKX_ILV_TARGET(tgt) & 1);
+	}
+
+	res->dev = d;
+	res->socket = d->imc[0].src_id;
+	res->imc = GET_BITFIELD(d->mcroute, lchan * 3, lchan * 3 + 2);
+	res->channel = GET_BITFIELD(d->mcroute, lchan * 2 + 18, lchan * 2 + 19);
+
+	edac_dbg(2, "%llx: socket=%d imc=%d channel=%d\n",
+		 res->addr, res->socket, res->imc, res->channel);
+	return true;
+}
+
+#define	SKX_MAX_TAD 8
+
+#define SKX_GET_TADBASE(d, mc, i, reg)			\
+	pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x850 + 4 * (i), &reg)
+#define SKX_GET_TADWAYNESS(d, mc, i, reg)		\
+	pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x880 + 4 * (i), &reg)
+#define SKX_GET_TADCHNILVOFFSET(d, mc, ch, i, reg)	\
+	pci_read_config_dword((d)->imc[mc].chan[ch].cdev, 0x90 + 4 * (i), &reg)
+
+#define	SKX_TAD_BASE(b)		((u64)GET_BITFIELD((b), 12, 31) << 26)
+#define SKX_TAD_SKT_GRAN(b)	GET_BITFIELD((b), 4, 5)
+#define SKX_TAD_CHN_GRAN(b)	GET_BITFIELD((b), 6, 7)
+#define	SKX_TAD_LIMIT(b)	(((u64)GET_BITFIELD((b), 12, 31) << 26) | MASK26)
+#define	SKX_TAD_OFFSET(b)	((u64)GET_BITFIELD((b), 4, 23) << 26)
+#define	SKX_TAD_SKTWAYS(b)	(1 << GET_BITFIELD((b), 10, 11))
+#define	SKX_TAD_CHNWAYS(b)	(GET_BITFIELD((b), 8, 9) + 1)
+
+/* which bit used for both socket and channel interleave */
+static int skx_granularity[] = { 6, 8, 12, 30 };
+
+static u64 skx_do_interleave(u64 addr, int shift, int ways, u64 lowbits)
+{
+	addr >>= shift;
+	addr /= ways;
+	addr <<= shift;
+
+	return addr | (lowbits & ((1ull << shift) - 1));
+}
+
+static bool skx_tad_decode(struct decoded_addr *res)
+{
+	int i;
+	u32 base, wayness, chnilvoffset;
+	int skt_interleave_bit, chn_interleave_bit;
+	u64 channel_addr;
+
+	for (i = 0; i < SKX_MAX_TAD; i++) {
+		SKX_GET_TADBASE(res->dev, res->imc, i, base);
+		SKX_GET_TADWAYNESS(res->dev, res->imc, i, wayness);
+		if (SKX_TAD_BASE(base) <= res->addr && res->addr <= SKX_TAD_LIMIT(wayness))
+			goto tad_found;
+	}
+	edac_dbg(0, "No TAD entry for %llx\n", res->addr);
+	return false;
+
+tad_found:
+	res->sktways = SKX_TAD_SKTWAYS(wayness);
+	res->chanways = SKX_TAD_CHNWAYS(wayness);
+	skt_interleave_bit = skx_granularity[SKX_TAD_SKT_GRAN(base)];
+	chn_interleave_bit = skx_granularity[SKX_TAD_CHN_GRAN(base)];
+
+	SKX_GET_TADCHNILVOFFSET(res->dev, res->imc, res->channel, i, chnilvoffset);
+	channel_addr = res->addr - SKX_TAD_OFFSET(chnilvoffset);
+
+	if (res->chanways == 3 && skt_interleave_bit > chn_interleave_bit) {
+		/* Must handle channel first, then socket */
+		channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit,
+						 res->chanways, channel_addr);
+		channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit,
+						 res->sktways, channel_addr);
+	} else {
+		/* Handle socket then channel. Preserve low bits from original address */
+		channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit,
+						 res->sktways, res->addr);
+		channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit,
+						 res->chanways, res->addr);
+	}
+
+	res->chan_addr = channel_addr;
+
+	edac_dbg(2, "%llx: chan_addr=%llx sktways=%d chanways=%d\n",
+		 res->addr, res->chan_addr, res->sktways, res->chanways);
+	return true;
+}
+
+#define SKX_MAX_RIR 4
+
+#define SKX_GET_RIRWAYNESS(d, mc, ch, i, reg)		\
+	pci_read_config_dword((d)->imc[mc].chan[ch].cdev,	\
+			      0x108 + 4 * (i), &reg)
+#define SKX_GET_RIRILV(d, mc, ch, idx, i, reg)		\
+	pci_read_config_dword((d)->imc[mc].chan[ch].cdev,	\
+			      0x120 + 16 * idx + 4 * (i), &reg)
+
+#define	SKX_RIR_VALID(b) GET_BITFIELD((b), 31, 31)
+#define	SKX_RIR_LIMIT(b) (((u64)GET_BITFIELD((b), 1, 11) << 29) | MASK29)
+#define	SKX_RIR_WAYS(b) (1 << GET_BITFIELD((b), 28, 29))
+#define	SKX_RIR_CHAN_RANK(b) GET_BITFIELD((b), 16, 19)
+#define	SKX_RIR_OFFSET(b) ((u64)(GET_BITFIELD((b), 2, 15) << 26))
+
+static bool skx_rir_decode(struct decoded_addr *res)
+{
+	int i, idx, chan_rank;
+	int shift;
+	u32 rirway, rirlv;
+	u64 rank_addr, prev_limit = 0, limit;
+
+	if (res->dev->imc[res->imc].chan[res->channel].dimms[0].close_pg)
+		shift = 6;
+	else
+		shift = 13;
+
+	for (i = 0; i < SKX_MAX_RIR; i++) {
+		SKX_GET_RIRWAYNESS(res->dev, res->imc, res->channel, i, rirway);
+		limit = SKX_RIR_LIMIT(rirway);
+		if (SKX_RIR_VALID(rirway)) {
+			if (prev_limit <= res->chan_addr &&
+			    res->chan_addr <= limit)
+				goto rir_found;
+		}
+		prev_limit = limit;
+	}
+	edac_dbg(0, "No RIR entry for %llx\n", res->addr);
+	return false;
+
+rir_found:
+	rank_addr = res->chan_addr >> shift;
+	rank_addr /= SKX_RIR_WAYS(rirway);
+	rank_addr <<= shift;
+	rank_addr |= res->chan_addr & GENMASK_ULL(shift - 1, 0);
+
+	res->rank_address = rank_addr;
+	idx = (res->chan_addr >> shift) % SKX_RIR_WAYS(rirway);
+
+	SKX_GET_RIRILV(res->dev, res->imc, res->channel, idx, i, rirlv);
+	res->rank_address = rank_addr - SKX_RIR_OFFSET(rirlv);
+	chan_rank = SKX_RIR_CHAN_RANK(rirlv);
+	res->channel_rank = chan_rank;
+	res->dimm = chan_rank / 4;
+	res->rank = chan_rank % 4;
+
+	edac_dbg(2, "%llx: dimm=%d rank=%d chan_rank=%d rank_addr=%llx\n",
+		 res->addr, res->dimm, res->rank,
+		 res->channel_rank, res->rank_address);
+	return true;
+}
+
+static u8 skx_close_row[] = {
+	15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33
+};
+static u8 skx_close_column[] = {
+	3, 4, 5, 14, 19, 23, 24, 25, 26, 27
+};
+static u8 skx_open_row[] = {
+	14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33
+};
+static u8 skx_open_column[] = {
+	3, 4, 5, 6, 7, 8, 9, 10, 11, 12
+};
+static u8 skx_open_fine_column[] = {
+	3, 4, 5, 7, 8, 9, 10, 11, 12, 13
+};
+
+static int skx_bits(u64 addr, int nbits, u8 *bits)
+{
+	int i, res = 0;
+
+	for (i = 0; i < nbits; i++)
+		res |= ((addr >> bits[i]) & 1) << i;
+	return res;
+}
+
+static int skx_bank_bits(u64 addr, int b0, int b1, int do_xor, int x0, int x1)
+{
+	int ret = GET_BITFIELD(addr, b0, b0) | (GET_BITFIELD(addr, b1, b1) << 1);
+
+	if (do_xor)
+		ret ^= GET_BITFIELD(addr, x0, x0) | (GET_BITFIELD(addr, x1, x1) << 1);
+
+	return ret;
+}
+
+static bool skx_mad_decode(struct decoded_addr *r)
+{
+	struct skx_dimm *dimm = &r->dev->imc[r->imc].chan[r->channel].dimms[r->dimm];
+	int bg0 = dimm->fine_grain_bank ? 6 : 13;
+
+	if (dimm->close_pg) {
+		r->row = skx_bits(r->rank_address, dimm->rowbits, skx_close_row);
+		r->column = skx_bits(r->rank_address, dimm->colbits, skx_close_column);
+		r->column |= 0x400; /* C10 is autoprecharge, always set */
+		r->bank_address = skx_bank_bits(r->rank_address, 8, 9, dimm->bank_xor_enable, 22, 28);
+		r->bank_group = skx_bank_bits(r->rank_address, 6, 7, dimm->bank_xor_enable, 20, 21);
+	} else {
+		r->row = skx_bits(r->rank_address, dimm->rowbits, skx_open_row);
+		if (dimm->fine_grain_bank)
+			r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_fine_column);
+		else
+			r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_column);
+		r->bank_address = skx_bank_bits(r->rank_address, 18, 19, dimm->bank_xor_enable, 22, 23);
+		r->bank_group = skx_bank_bits(r->rank_address, bg0, 17, dimm->bank_xor_enable, 20, 21);
+	}
+	r->row &= (1u << dimm->rowbits) - 1;
+
+	edac_dbg(2, "%llx: row=%x col=%x bank_addr=%d bank_group=%d\n",
+		 r->addr, r->row, r->column, r->bank_address,
+		 r->bank_group);
+	return true;
+}
+
+static bool skx_decode(struct decoded_addr *res)
+{
+
+	return skx_sad_decode(res) && skx_tad_decode(res) &&
+		skx_rir_decode(res) && skx_mad_decode(res);
+}
+
+#ifdef CONFIG_EDAC_DEBUG
+/*
+ * Debug feature. Make /sys/kernel/debug/skx_edac_test/addr.
+ * Write an address to this file to exercise the address decode
+ * logic in this driver.
+ */
+static struct dentry *skx_test;
+static u64 skx_fake_addr;
+
+static int debugfs_u64_set(void *data, u64 val)
+{
+	struct decoded_addr res;
+
+	res.addr = val;
+	skx_decode(&res);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
+
+static struct dentry *mydebugfs_create(const char *name, umode_t mode,
+				       struct dentry *parent, u64 *value)
+{
+	return debugfs_create_file(name, mode, parent, value, &fops_u64_wo);
+}
+
+static void setup_skx_debug(void)
+{
+	skx_test = debugfs_create_dir("skx_edac_test", NULL);
+	mydebugfs_create("addr", S_IWUSR, skx_test, &skx_fake_addr);
+}
+
+static void teardown_skx_debug(void)
+{
+	debugfs_remove_recursive(skx_test);
+}
+#else
+static void setup_skx_debug(void)
+{
+}
+
+static void teardown_skx_debug(void)
+{
+}
+#endif /*CONFIG_EDAC_DEBUG*/
+
+static void skx_mce_output_error(struct mem_ctl_info *mci,
+				 const struct mce *m,
+				 struct decoded_addr *res)
+{
+	enum hw_event_mc_err_type tp_event;
+	char *type, *optype, msg[256];
+	bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
+	bool overflow = GET_BITFIELD(m->status, 62, 62);
+	bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
+	bool recoverable;
+	u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
+	u32 mscod = GET_BITFIELD(m->status, 16, 31);
+	u32 errcode = GET_BITFIELD(m->status, 0, 15);
+	u32 optypenum = GET_BITFIELD(m->status, 4, 6);
+
+	recoverable = GET_BITFIELD(m->status, 56, 56);
+
+	if (uncorrected_error) {
+		if (ripv) {
+			type = "FATAL";
+			tp_event = HW_EVENT_ERR_FATAL;
+		} else {
+			type = "NON_FATAL";
+			tp_event = HW_EVENT_ERR_UNCORRECTED;
+		}
+	} else {
+		type = "CORRECTED";
+		tp_event = HW_EVENT_ERR_CORRECTED;
+	}
+
+	/*
+	 * According with Table 15-9 of the Intel Architecture spec vol 3A,
+	 * memory errors should fit in this mask:
+	 *	000f 0000 1mmm cccc (binary)
+	 * where:
+	 *	f = Correction Report Filtering Bit. If 1, subsequent errors
+	 *	    won't be shown
+	 *	mmm = error type
+	 *	cccc = channel
+	 * If the mask doesn't match, report an error to the parsing logic
+	 */
+	if (!((errcode & 0xef80) == 0x80)) {
+		optype = "Can't parse: it is not a mem";
+	} else {
+		switch (optypenum) {
+		case 0:
+			optype = "generic undef request error";
+			break;
+		case 1:
+			optype = "memory read error";
+			break;
+		case 2:
+			optype = "memory write error";
+			break;
+		case 3:
+			optype = "addr/cmd error";
+			break;
+		case 4:
+			optype = "memory scrubbing error";
+			break;
+		default:
+			optype = "reserved";
+			break;
+		}
+	}
+
+	snprintf(msg, sizeof(msg),
+		 "%s%s err_code:%04x:%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:%x col:%x",
+		 overflow ? " OVERFLOW" : "",
+		 (uncorrected_error && recoverable) ? " recoverable" : "",
+		 mscod, errcode,
+		 res->socket, res->imc, res->rank,
+		 res->bank_group, res->bank_address, res->row, res->column);
+
+	edac_dbg(0, "%s\n", msg);
+
+	/* Call the helper to output message */
+	edac_mc_handle_error(tp_event, mci, core_err_cnt,
+			     m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
+			     res->channel, res->dimm, -1,
+			     optype, msg);
+}
+
+static int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
+			       void *data)
+{
+	struct mce *mce = (struct mce *)data;
+	struct decoded_addr res;
+	struct mem_ctl_info *mci;
+	char *type;
+
+	if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
+		return NOTIFY_DONE;
+
+	/* ignore unless this is memory related with an address */
+	if ((mce->status & 0xefff) >> 7 != 1 || !(mce->status & MCI_STATUS_ADDRV))
+		return NOTIFY_DONE;
+
+	res.addr = mce->addr;
+	if (!skx_decode(&res))
+		return NOTIFY_DONE;
+	mci = res.dev->imc[res.imc].mci;
+
+	if (mce->mcgstatus & MCG_STATUS_MCIP)
+		type = "Exception";
+	else
+		type = "Event";
+
+	skx_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n");
+
+	skx_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx "
+			  "Bank %d: %016Lx\n", mce->extcpu, type,
+			  mce->mcgstatus, mce->bank, mce->status);
+	skx_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc);
+	skx_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr);
+	skx_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc);
+
+	skx_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET "
+			  "%u APIC %x\n", mce->cpuvendor, mce->cpuid,
+			  mce->time, mce->socketid, mce->apicid);
+
+	skx_mce_output_error(mci, mce, &res);
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block skx_mce_dec = {
+	.notifier_call = skx_mce_check_error,
+};
+
+static void skx_remove(void)
+{
+	int i, j;
+	struct skx_dev *d, *tmp;
+
+	edac_dbg(0, "\n");
+
+	list_for_each_entry_safe(d, tmp, &skx_edac_list, list) {
+		list_del(&d->list);
+		for (i = 0; i < NUM_IMC; i++) {
+			skx_unregister_mci(&d->imc[i]);
+			for (j = 0; j < NUM_CHANNELS; j++)
+				pci_dev_put(d->imc[i].chan[j].cdev);
+		}
+		pci_dev_put(d->util_all);
+		pci_dev_put(d->sad_all);
+
+		kfree(d);
+	}
+}
+
+/*
+ * skx_init:
+ *	make sure we are running on the correct cpu model
+ *	search for all the devices we need
+ *	check which DIMMs are present.
+ */
+int __init skx_init(void)
+{
+	const struct x86_cpu_id *id;
+	const struct munit *m;
+	int rc = 0, i;
+	u8 mc = 0, src_id, node_id;
+	struct skx_dev *d;
+
+	edac_dbg(2, "\n");
+
+	id = x86_match_cpu(skx_cpuids);
+	if (!id)
+		return -ENODEV;
+
+	rc = skx_get_hi_lo();
+	if (rc)
+		return rc;
+
+	rc = get_all_bus_mappings();
+	if (rc < 0)
+		goto fail;
+	if (rc == 0) {
+		edac_dbg(2, "No memory controllers found\n");
+		return -ENODEV;
+	}
+
+	for (m = skx_all_munits; m->did; m++) {
+		rc = get_all_munits(m);
+		if (rc < 0)
+			goto fail;
+		if (rc != m->per_socket * skx_num_sockets) {
+			edac_dbg(2, "Expected %d, got %d of %x\n",
+				 m->per_socket * skx_num_sockets, rc, m->did);
+			rc = -ENODEV;
+			goto fail;
+		}
+	}
+
+	list_for_each_entry(d, &skx_edac_list, list) {
+		src_id = get_src_id(d);
+		node_id = skx_get_node_id(d);
+		edac_dbg(2, "src_id=%d node_id=%d\n", src_id, node_id);
+		for (i = 0; i < NUM_IMC; i++) {
+			d->imc[i].mc = mc++;
+			d->imc[i].lmc = i;
+			d->imc[i].src_id = src_id;
+			d->imc[i].node_id = node_id;
+			rc = skx_register_mci(&d->imc[i]);
+			if (rc < 0)
+				goto fail;
+		}
+	}
+
+	/* Ensure that the OPSTATE is set correctly for POLL or NMI */
+	opstate_init();
+
+	setup_skx_debug();
+
+	mce_register_decode_chain(&skx_mce_dec);
+
+	return 0;
+fail:
+	skx_remove();
+	return rc;
+}
+
+static void __exit skx_exit(void)
+{
+	edac_dbg(2, "\n");
+	mce_unregister_decode_chain(&skx_mce_dec);
+	skx_remove();
+	teardown_skx_debug();
+}
+
+module_init(skx_init);
+module_exit(skx_exit);
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Tony Luck");
+MODULE_DESCRIPTION("MC Driver for Intel Skylake server processors");

+ 2 - 2
drivers/gpu/drm/amd/amdgpu/amdgpu.h

@@ -646,9 +646,9 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
 void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
 void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
 int amdgpu_gart_init(struct amdgpu_device *adev);
 int amdgpu_gart_init(struct amdgpu_device *adev);
 void amdgpu_gart_fini(struct amdgpu_device *adev);
 void amdgpu_gart_fini(struct amdgpu_device *adev);
-void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
+void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
 			int pages);
 			int pages);
-int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
+int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
 		     int pages, struct page **pagelist,
 		     int pages, struct page **pagelist,
 		     dma_addr_t *dma_addr, uint32_t flags);
 		     dma_addr_t *dma_addr, uint32_t flags);
 
 

+ 0 - 9
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c

@@ -200,16 +200,7 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
 	atpx->is_hybrid = false;
 	atpx->is_hybrid = false;
 	if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
 	if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
 		printk("ATPX Hybrid Graphics\n");
 		printk("ATPX Hybrid Graphics\n");
-#if 1
-		/* This is a temporary hack until the D3 cold support
-		 * makes it upstream.  The ATPX power_control method seems
-		 * to still work on even if the system should be using
-		 * the new standardized hybrid D3 cold ACPI interface.
-		 */
-		atpx->functions.power_cntl = true;
-#else
 		atpx->functions.power_cntl = false;
 		atpx->functions.power_cntl = false;
-#endif
 		atpx->is_hybrid = true;
 		atpx->is_hybrid = true;
 	}
 	}
 
 

+ 2 - 2
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c

@@ -221,7 +221,7 @@ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
  * Unbinds the requested pages from the gart page table and
  * Unbinds the requested pages from the gart page table and
  * replaces them with the dummy page (all asics).
  * replaces them with the dummy page (all asics).
  */
  */
-void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
+void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
 			int pages)
 			int pages)
 {
 {
 	unsigned t;
 	unsigned t;
@@ -268,7 +268,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
  * (all asics).
  * (all asics).
  * Returns 0 for success, -EINVAL for failure.
  * Returns 0 for success, -EINVAL for failure.
  */
  */
-int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
+int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
 		     int pages, struct page **pagelist, dma_addr_t *dma_addr,
 		     int pages, struct page **pagelist, dma_addr_t *dma_addr,
 		     uint32_t flags)
 		     uint32_t flags)
 {
 {

+ 2 - 1
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c

@@ -1187,7 +1187,8 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 		r = 0;
 		r = 0;
 	}
 	}
 
 
-error:
 	fence_put(fence);
 	fence_put(fence);
+
+error:
 	return r;
 	return r;
 }
 }

+ 4 - 1
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

@@ -1535,7 +1535,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 	r = amd_sched_entity_init(&ring->sched, &vm->entity,
 	r = amd_sched_entity_init(&ring->sched, &vm->entity,
 				  rq, amdgpu_sched_jobs);
 				  rq, amdgpu_sched_jobs);
 	if (r)
 	if (r)
-		return r;
+		goto err;
 
 
 	vm->page_directory_fence = NULL;
 	vm->page_directory_fence = NULL;
 
 
@@ -1565,6 +1565,9 @@ error_free_page_directory:
 error_free_sched_entity:
 error_free_sched_entity:
 	amd_sched_entity_fini(&ring->sched, &vm->entity);
 	amd_sched_entity_fini(&ring->sched, &vm->entity);
 
 
+err:
+	drm_free_large(vm->page_tables);
+
 	return r;
 	return r;
 }
 }
 
 

+ 1 - 1
drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c

@@ -184,7 +184,7 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
 							sizeof(u32)) + inx;
 							sizeof(u32)) + inx;
 
 
 	pr_debug("kfd: get kernel queue doorbell\n"
 	pr_debug("kfd: get kernel queue doorbell\n"
-			 "     doorbell offset   == 0x%08d\n"
+			 "     doorbell offset   == 0x%08X\n"
 			 "     kernel address    == 0x%08lX\n",
 			 "     kernel address    == 0x%08lX\n",
 		*doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx));
 		*doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx));
 
 

+ 1 - 1
drivers/gpu/drm/drm_fb_helper.c

@@ -464,7 +464,7 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
 
 
 	/* Sometimes user space wants everything disabled, so don't steal the
 	/* Sometimes user space wants everything disabled, so don't steal the
 	 * display if there's a master. */
 	 * display if there's a master. */
-	if (lockless_dereference(dev->master))
+	if (READ_ONCE(dev->master))
 		return false;
 		return false;
 
 
 	drm_for_each_crtc(crtc, dev) {
 	drm_for_each_crtc(crtc, dev) {

+ 5 - 5
drivers/gpu/drm/etnaviv/etnaviv_gpu.c

@@ -1333,8 +1333,6 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
 	if (ret < 0)
 	if (ret < 0)
 		return ret;
 		return ret;
 
 
-	mutex_lock(&gpu->lock);
-
 	/*
 	/*
 	 * TODO
 	 * TODO
 	 *
 	 *
@@ -1348,16 +1346,18 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
 	if (unlikely(event == ~0U)) {
 	if (unlikely(event == ~0U)) {
 		DRM_ERROR("no free event\n");
 		DRM_ERROR("no free event\n");
 		ret = -EBUSY;
 		ret = -EBUSY;
-		goto out_unlock;
+		goto out_pm_put;
 	}
 	}
 
 
 	fence = etnaviv_gpu_fence_alloc(gpu);
 	fence = etnaviv_gpu_fence_alloc(gpu);
 	if (!fence) {
 	if (!fence) {
 		event_free(gpu, event);
 		event_free(gpu, event);
 		ret = -ENOMEM;
 		ret = -ENOMEM;
-		goto out_unlock;
+		goto out_pm_put;
 	}
 	}
 
 
+	mutex_lock(&gpu->lock);
+
 	gpu->event[event].fence = fence;
 	gpu->event[event].fence = fence;
 	submit->fence = fence->seqno;
 	submit->fence = fence->seqno;
 	gpu->active_fence = submit->fence;
 	gpu->active_fence = submit->fence;
@@ -1395,9 +1395,9 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
 	hangcheck_timer_reset(gpu);
 	hangcheck_timer_reset(gpu);
 	ret = 0;
 	ret = 0;
 
 
-out_unlock:
 	mutex_unlock(&gpu->lock);
 	mutex_unlock(&gpu->lock);
 
 
+out_pm_put:
 	etnaviv_gpu_pm_put(gpu);
 	etnaviv_gpu_pm_put(gpu);
 
 
 	return ret;
 	return ret;

+ 1 - 0
drivers/gpu/drm/i915/i915_drv.h

@@ -1854,6 +1854,7 @@ struct drm_i915_private {
 	enum modeset_restore modeset_restore;
 	enum modeset_restore modeset_restore;
 	struct mutex modeset_restore_lock;
 	struct mutex modeset_restore_lock;
 	struct drm_atomic_state *modeset_restore_state;
 	struct drm_atomic_state *modeset_restore_state;
+	struct drm_modeset_acquire_ctx reset_ctx;
 
 
 	struct list_head vm_list; /* Global list of all address spaces */
 	struct list_head vm_list; /* Global list of all address spaces */
 	struct i915_ggtt ggtt; /* VM representing the global address space */
 	struct i915_ggtt ggtt; /* VM representing the global address space */

+ 8 - 2
drivers/gpu/drm/i915/i915_gem.c

@@ -879,9 +879,12 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
 	ret = i915_gem_shmem_pread(dev, obj, args, file);
 	ret = i915_gem_shmem_pread(dev, obj, args, file);
 
 
 	/* pread for non shmem backed objects */
 	/* pread for non shmem backed objects */
-	if (ret == -EFAULT || ret == -ENODEV)
+	if (ret == -EFAULT || ret == -ENODEV) {
+		intel_runtime_pm_get(to_i915(dev));
 		ret = i915_gem_gtt_pread(dev, obj, args->size,
 		ret = i915_gem_gtt_pread(dev, obj, args->size,
 					args->offset, args->data_ptr);
 					args->offset, args->data_ptr);
+		intel_runtime_pm_put(to_i915(dev));
+	}
 
 
 out:
 out:
 	drm_gem_object_unreference(&obj->base);
 	drm_gem_object_unreference(&obj->base);
@@ -1306,7 +1309,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 		 * textures). Fallback to the shmem path in that case. */
 		 * textures). Fallback to the shmem path in that case. */
 	}
 	}
 
 
-	if (ret == -EFAULT) {
+	if (ret == -EFAULT || ret == -ENOSPC) {
 		if (obj->phys_handle)
 		if (obj->phys_handle)
 			ret = i915_gem_phys_pwrite(obj, args, file);
 			ret = i915_gem_phys_pwrite(obj, args, file);
 		else if (i915_gem_object_has_struct_page(obj))
 		else if (i915_gem_object_has_struct_page(obj))
@@ -3169,6 +3172,8 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
 	}
 	}
 
 
 	intel_ring_init_seqno(engine, engine->last_submitted_seqno);
 	intel_ring_init_seqno(engine, engine->last_submitted_seqno);
+
+	engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
 }
 }
 
 
 void i915_gem_reset(struct drm_device *dev)
 void i915_gem_reset(struct drm_device *dev)
@@ -3186,6 +3191,7 @@ void i915_gem_reset(struct drm_device *dev)
 
 
 	for_each_engine(engine, dev_priv)
 	for_each_engine(engine, dev_priv)
 		i915_gem_reset_engine_cleanup(engine);
 		i915_gem_reset_engine_cleanup(engine);
+	mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
 
 
 	i915_gem_context_reset(dev);
 	i915_gem_context_reset(dev);
 
 

+ 1 - 0
drivers/gpu/drm/i915/i915_gem_gtt.c

@@ -2873,6 +2873,7 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev)
 		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
 		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
 
 
 		ppgtt->base.cleanup(&ppgtt->base);
 		ppgtt->base.cleanup(&ppgtt->base);
+		kfree(ppgtt);
 	}
 	}
 
 
 	i915_gem_cleanup_stolen(dev);
 	i915_gem_cleanup_stolen(dev);

+ 1 - 0
drivers/gpu/drm/i915/i915_reg.h

@@ -1536,6 +1536,7 @@ enum skl_disp_power_wells {
 #define BALANCE_LEG_MASK(port)		(7<<(8+3*(port)))
 #define BALANCE_LEG_MASK(port)		(7<<(8+3*(port)))
 /* Balance leg disable bits */
 /* Balance leg disable bits */
 #define BALANCE_LEG_DISABLE_SHIFT	23
 #define BALANCE_LEG_DISABLE_SHIFT	23
+#define BALANCE_LEG_DISABLE(port)	(1 << (23 + (port)))
 
 
 /*
 /*
  * Fence registers
  * Fence registers

+ 6 - 0
drivers/gpu/drm/i915/intel_audio.c

@@ -600,6 +600,8 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
 	if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
 	if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
 		return;
 		return;
 
 
+	i915_audio_component_get_power(dev);
+
 	/*
 	/*
 	 * Enable/disable generating the codec wake signal, overriding the
 	 * Enable/disable generating the codec wake signal, overriding the
 	 * internal logic to generate the codec wake to controller.
 	 * internal logic to generate the codec wake to controller.
@@ -615,6 +617,8 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
 		I915_WRITE(HSW_AUD_CHICKENBIT, tmp);
 		I915_WRITE(HSW_AUD_CHICKENBIT, tmp);
 		usleep_range(1000, 1500);
 		usleep_range(1000, 1500);
 	}
 	}
+
+	i915_audio_component_put_power(dev);
 }
 }
 
 
 /* Get CDCLK in kHz  */
 /* Get CDCLK in kHz  */
@@ -648,6 +652,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev,
 	    !IS_HASWELL(dev_priv))
 	    !IS_HASWELL(dev_priv))
 		return 0;
 		return 0;
 
 
+	i915_audio_component_get_power(dev);
 	mutex_lock(&dev_priv->av_mutex);
 	mutex_lock(&dev_priv->av_mutex);
 	/* 1. get the pipe */
 	/* 1. get the pipe */
 	intel_encoder = dev_priv->dig_port_map[port];
 	intel_encoder = dev_priv->dig_port_map[port];
@@ -698,6 +703,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev,
 
 
  unlock:
  unlock:
 	mutex_unlock(&dev_priv->av_mutex);
 	mutex_unlock(&dev_priv->av_mutex);
+	i915_audio_component_put_power(dev);
 	return err;
 	return err;
 }
 }
 
 

+ 65 - 26
drivers/gpu/drm/i915/intel_ddi.c

@@ -145,7 +145,7 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
 static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
 static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
 	{ 0x0000201B, 0x000000A2, 0x0 },
 	{ 0x0000201B, 0x000000A2, 0x0 },
 	{ 0x00005012, 0x00000088, 0x0 },
 	{ 0x00005012, 0x00000088, 0x0 },
-	{ 0x80007011, 0x000000CD, 0x0 },
+	{ 0x80007011, 0x000000CD, 0x1 },
 	{ 0x80009010, 0x000000C0, 0x1 },
 	{ 0x80009010, 0x000000C0, 0x1 },
 	{ 0x0000201B, 0x0000009D, 0x0 },
 	{ 0x0000201B, 0x0000009D, 0x0 },
 	{ 0x80005012, 0x000000C0, 0x1 },
 	{ 0x80005012, 0x000000C0, 0x1 },
@@ -158,7 +158,7 @@ static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
 static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
 static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
 	{ 0x00000018, 0x000000A2, 0x0 },
 	{ 0x00000018, 0x000000A2, 0x0 },
 	{ 0x00005012, 0x00000088, 0x0 },
 	{ 0x00005012, 0x00000088, 0x0 },
-	{ 0x80007011, 0x000000CD, 0x0 },
+	{ 0x80007011, 0x000000CD, 0x3 },
 	{ 0x80009010, 0x000000C0, 0x3 },
 	{ 0x80009010, 0x000000C0, 0x3 },
 	{ 0x00000018, 0x0000009D, 0x0 },
 	{ 0x00000018, 0x0000009D, 0x0 },
 	{ 0x80005012, 0x000000C0, 0x3 },
 	{ 0x80005012, 0x000000C0, 0x3 },
@@ -388,6 +388,40 @@ skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
 	}
 	}
 }
 }
 
 
+static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
+{
+	int n_hdmi_entries;
+	int hdmi_level;
+	int hdmi_default_entry;
+
+	hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
+
+	if (IS_BROXTON(dev_priv))
+		return hdmi_level;
+
+	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+		skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
+		hdmi_default_entry = 8;
+	} else if (IS_BROADWELL(dev_priv)) {
+		n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
+		hdmi_default_entry = 7;
+	} else if (IS_HASWELL(dev_priv)) {
+		n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
+		hdmi_default_entry = 6;
+	} else {
+		WARN(1, "ddi translation table missing\n");
+		n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
+		hdmi_default_entry = 7;
+	}
+
+	/* Choose a good default if VBT is badly populated */
+	if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN ||
+	    hdmi_level >= n_hdmi_entries)
+		hdmi_level = hdmi_default_entry;
+
+	return hdmi_level;
+}
+
 /*
 /*
  * Starting with Haswell, DDI port buffers must be programmed with correct
  * Starting with Haswell, DDI port buffers must be programmed with correct
  * values in advance. The buffer values are different for FDI and DP modes,
  * values in advance. The buffer values are different for FDI and DP modes,
@@ -399,7 +433,7 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
 {
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	u32 iboost_bit = 0;
 	u32 iboost_bit = 0;
-	int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry,
+	int i, n_hdmi_entries, n_dp_entries, n_edp_entries,
 	    size;
 	    size;
 	int hdmi_level;
 	int hdmi_level;
 	enum port port;
 	enum port port;
@@ -410,7 +444,7 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
 	const struct ddi_buf_trans *ddi_translations;
 	const struct ddi_buf_trans *ddi_translations;
 
 
 	port = intel_ddi_get_encoder_port(encoder);
 	port = intel_ddi_get_encoder_port(encoder);
-	hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
+	hdmi_level = intel_ddi_hdmi_level(dev_priv, port);
 
 
 	if (IS_BROXTON(dev_priv)) {
 	if (IS_BROXTON(dev_priv)) {
 		if (encoder->type != INTEL_OUTPUT_HDMI)
 		if (encoder->type != INTEL_OUTPUT_HDMI)
@@ -430,7 +464,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
 				skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
 				skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
 		ddi_translations_hdmi =
 		ddi_translations_hdmi =
 				skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
 				skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
-		hdmi_default_entry = 8;
 		/* If we're boosting the current, set bit 31 of trans1 */
 		/* If we're boosting the current, set bit 31 of trans1 */
 		if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level ||
 		if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level ||
 		    dev_priv->vbt.ddi_port_info[port].dp_boost_level)
 		    dev_priv->vbt.ddi_port_info[port].dp_boost_level)
@@ -456,7 +489,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
 
 
 		n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
 		n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
 		n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
 		n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
-		hdmi_default_entry = 7;
 	} else if (IS_HASWELL(dev_priv)) {
 	} else if (IS_HASWELL(dev_priv)) {
 		ddi_translations_fdi = hsw_ddi_translations_fdi;
 		ddi_translations_fdi = hsw_ddi_translations_fdi;
 		ddi_translations_dp = hsw_ddi_translations_dp;
 		ddi_translations_dp = hsw_ddi_translations_dp;
@@ -464,7 +496,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
 		ddi_translations_hdmi = hsw_ddi_translations_hdmi;
 		ddi_translations_hdmi = hsw_ddi_translations_hdmi;
 		n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
 		n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
 		n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
 		n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
-		hdmi_default_entry = 6;
 	} else {
 	} else {
 		WARN(1, "ddi translation table missing\n");
 		WARN(1, "ddi translation table missing\n");
 		ddi_translations_edp = bdw_ddi_translations_dp;
 		ddi_translations_edp = bdw_ddi_translations_dp;
@@ -474,7 +505,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
 		n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
 		n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
 		n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
 		n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
 		n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
 		n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
-		hdmi_default_entry = 7;
 	}
 	}
 
 
 	switch (encoder->type) {
 	switch (encoder->type) {
@@ -505,11 +535,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
 	if (encoder->type != INTEL_OUTPUT_HDMI)
 	if (encoder->type != INTEL_OUTPUT_HDMI)
 		return;
 		return;
 
 
-	/* Choose a good default if VBT is badly populated */
-	if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN ||
-	    hdmi_level >= n_hdmi_entries)
-		hdmi_level = hdmi_default_entry;
-
 	/* Entry 9 is for HDMI: */
 	/* Entry 9 is for HDMI: */
 	I915_WRITE(DDI_BUF_TRANS_LO(port, i),
 	I915_WRITE(DDI_BUF_TRANS_LO(port, i),
 		   ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit);
 		   ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit);
@@ -1379,14 +1404,30 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
 			   TRANS_CLK_SEL_DISABLED);
 			   TRANS_CLK_SEL_DISABLED);
 }
 }
 
 
-static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
-			       u32 level, enum port port, int type)
+static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
+				enum port port, uint8_t iboost)
 {
 {
+	u32 tmp;
+
+	tmp = I915_READ(DISPIO_CR_TX_BMU_CR0);
+	tmp &= ~(BALANCE_LEG_MASK(port) | BALANCE_LEG_DISABLE(port));
+	if (iboost)
+		tmp |= iboost << BALANCE_LEG_SHIFT(port);
+	else
+		tmp |= BALANCE_LEG_DISABLE(port);
+	I915_WRITE(DISPIO_CR_TX_BMU_CR0, tmp);
+}
+
+static void skl_ddi_set_iboost(struct intel_encoder *encoder, u32 level)
+{
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+	enum port port = intel_dig_port->port;
+	int type = encoder->type;
 	const struct ddi_buf_trans *ddi_translations;
 	const struct ddi_buf_trans *ddi_translations;
 	uint8_t iboost;
 	uint8_t iboost;
 	uint8_t dp_iboost, hdmi_iboost;
 	uint8_t dp_iboost, hdmi_iboost;
 	int n_entries;
 	int n_entries;
-	u32 reg;
 
 
 	/* VBT may override standard boost values */
 	/* VBT may override standard boost values */
 	dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
 	dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
@@ -1428,16 +1469,10 @@ static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
 		return;
 		return;
 	}
 	}
 
 
-	reg = I915_READ(DISPIO_CR_TX_BMU_CR0);
-	reg &= ~BALANCE_LEG_MASK(port);
-	reg &= ~(1 << (BALANCE_LEG_DISABLE_SHIFT + port));
-
-	if (iboost)
-		reg |= iboost << BALANCE_LEG_SHIFT(port);
-	else
-		reg |= 1 << (BALANCE_LEG_DISABLE_SHIFT + port);
+	_skl_ddi_set_iboost(dev_priv, port, iboost);
 
 
-	I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg);
+	if (port == PORT_A && intel_dig_port->max_lanes == 4)
+		_skl_ddi_set_iboost(dev_priv, PORT_E, iboost);
 }
 }
 
 
 static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
 static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
@@ -1568,7 +1603,7 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
 	level = translate_signal_level(signal_levels);
 	level = translate_signal_level(signal_levels);
 
 
 	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
 	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
-		skl_ddi_set_iboost(dev_priv, level, port, encoder->type);
+		skl_ddi_set_iboost(encoder, level);
 	else if (IS_BROXTON(dev_priv))
 	else if (IS_BROXTON(dev_priv))
 		bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
 		bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
 
 
@@ -1637,6 +1672,10 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
 			intel_dp_stop_link_train(intel_dp);
 			intel_dp_stop_link_train(intel_dp);
 	} else if (type == INTEL_OUTPUT_HDMI) {
 	} else if (type == INTEL_OUTPUT_HDMI) {
 		struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 		struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+		int level = intel_ddi_hdmi_level(dev_priv, port);
+
+		if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+			skl_ddi_set_iboost(intel_encoder, level);
 
 
 		intel_hdmi->set_infoframes(encoder,
 		intel_hdmi->set_infoframes(encoder,
 					   crtc->config->has_hdmi_sink,
 					   crtc->config->has_hdmi_sink,

+ 110 - 60
drivers/gpu/drm/i915/intel_display.c

@@ -3093,40 +3093,110 @@ static void intel_update_primary_planes(struct drm_device *dev)
 
 
 	for_each_crtc(dev, crtc) {
 	for_each_crtc(dev, crtc) {
 		struct intel_plane *plane = to_intel_plane(crtc->primary);
 		struct intel_plane *plane = to_intel_plane(crtc->primary);
-		struct intel_plane_state *plane_state;
-
-		drm_modeset_lock_crtc(crtc, &plane->base);
-		plane_state = to_intel_plane_state(plane->base.state);
+		struct intel_plane_state *plane_state =
+			to_intel_plane_state(plane->base.state);
 
 
 		if (plane_state->visible)
 		if (plane_state->visible)
 			plane->update_plane(&plane->base,
 			plane->update_plane(&plane->base,
 					    to_intel_crtc_state(crtc->state),
 					    to_intel_crtc_state(crtc->state),
 					    plane_state);
 					    plane_state);
+	}
+}
+
+static int
+__intel_display_resume(struct drm_device *dev,
+		       struct drm_atomic_state *state)
+{
+	struct drm_crtc_state *crtc_state;
+	struct drm_crtc *crtc;
+	int i, ret;
+
+	intel_modeset_setup_hw_state(dev);
+	i915_redisable_vga(dev);
 
 
-		drm_modeset_unlock_crtc(crtc);
+	if (!state)
+		return 0;
+
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		/*
+		 * Force recalculation even if we restore
+		 * current state. With fast modeset this may not result
+		 * in a modeset when the state is compatible.
+		 */
+		crtc_state->mode_changed = true;
 	}
 	}
+
+	/* ignore any reset values/BIOS leftovers in the WM registers */
+	to_intel_atomic_state(state)->skip_intermediate_wm = true;
+
+	ret = drm_atomic_commit(state);
+
+	WARN_ON(ret == -EDEADLK);
+	return ret;
 }
 }
 
 
 void intel_prepare_reset(struct drm_i915_private *dev_priv)
 void intel_prepare_reset(struct drm_i915_private *dev_priv)
 {
 {
+	struct drm_device *dev = &dev_priv->drm;
+	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
+	struct drm_atomic_state *state;
+	int ret;
+
 	/* no reset support for gen2 */
 	/* no reset support for gen2 */
 	if (IS_GEN2(dev_priv))
 	if (IS_GEN2(dev_priv))
 		return;
 		return;
 
 
-	/* reset doesn't touch the display */
+	/*
+	 * Need mode_config.mutex so that we don't
+	 * trample ongoing ->detect() and whatnot.
+	 */
+	mutex_lock(&dev->mode_config.mutex);
+	drm_modeset_acquire_init(ctx, 0);
+	while (1) {
+		ret = drm_modeset_lock_all_ctx(dev, ctx);
+		if (ret != -EDEADLK)
+			break;
+
+		drm_modeset_backoff(ctx);
+	}
+
+	/* reset doesn't touch the display, but flips might get nuked anyway, */
 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
 		return;
 		return;
 
 
-	drm_modeset_lock_all(&dev_priv->drm);
 	/*
 	/*
 	 * Disabling the crtcs gracefully seems nicer. Also the
 	 * Disabling the crtcs gracefully seems nicer. Also the
 	 * g33 docs say we should at least disable all the planes.
 	 * g33 docs say we should at least disable all the planes.
 	 */
 	 */
-	intel_display_suspend(&dev_priv->drm);
+	state = drm_atomic_helper_duplicate_state(dev, ctx);
+	if (IS_ERR(state)) {
+		ret = PTR_ERR(state);
+		state = NULL;
+		DRM_ERROR("Duplicating state failed with %i\n", ret);
+		goto err;
+	}
+
+	ret = drm_atomic_helper_disable_all(dev, ctx);
+	if (ret) {
+		DRM_ERROR("Suspending crtc's failed with %i\n", ret);
+		goto err;
+	}
+
+	dev_priv->modeset_restore_state = state;
+	state->acquire_ctx = ctx;
+	return;
+
+err:
+	drm_atomic_state_free(state);
 }
 }
 
 
 void intel_finish_reset(struct drm_i915_private *dev_priv)
 void intel_finish_reset(struct drm_i915_private *dev_priv)
 {
 {
+	struct drm_device *dev = &dev_priv->drm;
+	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
+	struct drm_atomic_state *state = dev_priv->modeset_restore_state;
+	int ret;
+
 	/*
 	/*
 	 * Flips in the rings will be nuked by the reset,
 	 * Flips in the rings will be nuked by the reset,
 	 * so complete all pending flips so that user space
 	 * so complete all pending flips so that user space
@@ -3138,6 +3208,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
 	if (IS_GEN2(dev_priv))
 	if (IS_GEN2(dev_priv))
 		return;
 		return;
 
 
+	dev_priv->modeset_restore_state = NULL;
+
 	/* reset doesn't touch the display */
 	/* reset doesn't touch the display */
 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
 		/*
 		/*
@@ -3149,29 +3221,32 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
 		 * FIXME: Atomic will make this obsolete since we won't schedule
 		 * FIXME: Atomic will make this obsolete since we won't schedule
 		 * CS-based flips (which might get lost in gpu resets) any more.
 		 * CS-based flips (which might get lost in gpu resets) any more.
 		 */
 		 */
-		intel_update_primary_planes(&dev_priv->drm);
-		return;
-	}
-
-	/*
-	 * The display has been reset as well,
-	 * so need a full re-initialization.
-	 */
-	intel_runtime_pm_disable_interrupts(dev_priv);
-	intel_runtime_pm_enable_interrupts(dev_priv);
+		intel_update_primary_planes(dev);
+	} else {
+		/*
+		 * The display has been reset as well,
+		 * so need a full re-initialization.
+		 */
+		intel_runtime_pm_disable_interrupts(dev_priv);
+		intel_runtime_pm_enable_interrupts(dev_priv);
 
 
-	intel_modeset_init_hw(&dev_priv->drm);
+		intel_modeset_init_hw(dev);
 
 
-	spin_lock_irq(&dev_priv->irq_lock);
-	if (dev_priv->display.hpd_irq_setup)
-		dev_priv->display.hpd_irq_setup(dev_priv);
-	spin_unlock_irq(&dev_priv->irq_lock);
+		spin_lock_irq(&dev_priv->irq_lock);
+		if (dev_priv->display.hpd_irq_setup)
+			dev_priv->display.hpd_irq_setup(dev_priv);
+		spin_unlock_irq(&dev_priv->irq_lock);
 
 
-	intel_display_resume(&dev_priv->drm);
+		ret = __intel_display_resume(dev, state);
+		if (ret)
+			DRM_ERROR("Restoring old state failed with %i\n", ret);
 
 
-	intel_hpd_init(dev_priv);
+		intel_hpd_init(dev_priv);
+	}
 
 
-	drm_modeset_unlock_all(&dev_priv->drm);
+	drm_modeset_drop_locks(ctx);
+	drm_modeset_acquire_fini(ctx);
+	mutex_unlock(&dev->mode_config.mutex);
 }
 }
 
 
 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
@@ -16156,9 +16231,10 @@ void intel_display_resume(struct drm_device *dev)
 	struct drm_atomic_state *state = dev_priv->modeset_restore_state;
 	struct drm_atomic_state *state = dev_priv->modeset_restore_state;
 	struct drm_modeset_acquire_ctx ctx;
 	struct drm_modeset_acquire_ctx ctx;
 	int ret;
 	int ret;
-	bool setup = false;
 
 
 	dev_priv->modeset_restore_state = NULL;
 	dev_priv->modeset_restore_state = NULL;
+	if (state)
+		state->acquire_ctx = &ctx;
 
 
 	/*
 	/*
 	 * This is a cludge because with real atomic modeset mode_config.mutex
 	 * This is a cludge because with real atomic modeset mode_config.mutex
@@ -16169,43 +16245,17 @@ void intel_display_resume(struct drm_device *dev)
 	mutex_lock(&dev->mode_config.mutex);
 	mutex_lock(&dev->mode_config.mutex);
 	drm_modeset_acquire_init(&ctx, 0);
 	drm_modeset_acquire_init(&ctx, 0);
 
 
-retry:
-	ret = drm_modeset_lock_all_ctx(dev, &ctx);
-
-	if (ret == 0 && !setup) {
-		setup = true;
-
-		intel_modeset_setup_hw_state(dev);
-		i915_redisable_vga(dev);
-	}
-
-	if (ret == 0 && state) {
-		struct drm_crtc_state *crtc_state;
-		struct drm_crtc *crtc;
-		int i;
-
-		state->acquire_ctx = &ctx;
-
-		/* ignore any reset values/BIOS leftovers in the WM registers */
-		to_intel_atomic_state(state)->skip_intermediate_wm = true;
-
-		for_each_crtc_in_state(state, crtc, crtc_state, i) {
-			/*
-			 * Force recalculation even if we restore
-			 * current state. With fast modeset this may not result
-			 * in a modeset when the state is compatible.
-			 */
-			crtc_state->mode_changed = true;
-		}
-
-		ret = drm_atomic_commit(state);
-	}
+	while (1) {
+		ret = drm_modeset_lock_all_ctx(dev, &ctx);
+		if (ret != -EDEADLK)
+			break;
 
 
-	if (ret == -EDEADLK) {
 		drm_modeset_backoff(&ctx);
 		drm_modeset_backoff(&ctx);
-		goto retry;
 	}
 	}
 
 
+	if (!ret)
+		ret = __intel_display_resume(dev, state);
+
 	drm_modeset_drop_locks(&ctx);
 	drm_modeset_drop_locks(&ctx);
 	drm_modeset_acquire_fini(&ctx);
 	drm_modeset_acquire_fini(&ctx);
 	mutex_unlock(&dev->mode_config.mutex);
 	mutex_unlock(&dev->mode_config.mutex);

+ 20 - 0
drivers/gpu/drm/i915/intel_fbc.c

@@ -1230,12 +1230,29 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
 	if (i915.enable_fbc >= 0)
 	if (i915.enable_fbc >= 0)
 		return !!i915.enable_fbc;
 		return !!i915.enable_fbc;
 
 
+	if (!HAS_FBC(dev_priv))
+		return 0;
+
 	if (IS_BROADWELL(dev_priv))
 	if (IS_BROADWELL(dev_priv))
 		return 1;
 		return 1;
 
 
 	return 0;
 	return 0;
 }
 }
 
 
+static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
+{
+#ifdef CONFIG_INTEL_IOMMU
+	/* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
+	if (intel_iommu_gfx_mapped &&
+	    (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) {
+		DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
+		return true;
+	}
+#endif
+
+	return false;
+}
+
 /**
 /**
  * intel_fbc_init - Initialize FBC
  * intel_fbc_init - Initialize FBC
  * @dev_priv: the i915 device
  * @dev_priv: the i915 device
@@ -1253,6 +1270,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
 	fbc->active = false;
 	fbc->active = false;
 	fbc->work.scheduled = false;
 	fbc->work.scheduled = false;
 
 
+	if (need_fbc_vtd_wa(dev_priv))
+		mkwrite_device_info(dev_priv)->has_fbc = false;
+
 	i915.enable_fbc = intel_sanitize_fbc_option(dev_priv);
 	i915.enable_fbc = intel_sanitize_fbc_option(dev_priv);
 	DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc);
 	DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc);
 
 

+ 3 - 3
drivers/gpu/drm/i915/intel_pm.c

@@ -3344,6 +3344,8 @@ static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
 		plane_bytes_per_line *= 4;
 		plane_bytes_per_line *= 4;
 		plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
 		plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
 		plane_blocks_per_line /= 4;
 		plane_blocks_per_line /= 4;
+	} else if (tiling == DRM_FORMAT_MOD_NONE) {
+		plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1;
 	} else {
 	} else {
 		plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
 		plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
 	}
 	}
@@ -6574,9 +6576,7 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
 
 
 void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
 void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
 {
 {
-	if (IS_CHERRYVIEW(dev_priv))
-		return;
-	else if (IS_VALLEYVIEW(dev_priv))
+	if (IS_VALLEYVIEW(dev_priv))
 		valleyview_cleanup_gt_powersave(dev_priv);
 		valleyview_cleanup_gt_powersave(dev_priv);
 
 
 	if (!i915.enable_rc6)
 	if (!i915.enable_rc6)

+ 4 - 4
drivers/gpu/drm/i915/intel_ringbuffer.c

@@ -1178,8 +1178,8 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
 		I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
 		I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
 					   L3_HIGH_PRIO_CREDITS(2));
 					   L3_HIGH_PRIO_CREDITS(2));
 
 
-	/* WaInsertDummyPushConstPs:bxt */
-	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
+	/* WaToEnableHwFixForPushConstHWBug:bxt */
+	if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
 		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
 		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
 				  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
 				  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
 
 
@@ -1222,8 +1222,8 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
 		I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
 		I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
 			   GEN8_LQSC_RO_PERF_DIS);
 			   GEN8_LQSC_RO_PERF_DIS);
 
 
-	/* WaInsertDummyPushConstPs:kbl */
-	if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+	/* WaToEnableHwFixForPushConstHWBug:kbl */
+	if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
 		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
 		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
 				  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
 				  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
 
 

+ 3 - 0
drivers/gpu/drm/mediatek/Kconfig

@@ -2,6 +2,9 @@ config DRM_MEDIATEK
 	tristate "DRM Support for Mediatek SoCs"
 	tristate "DRM Support for Mediatek SoCs"
 	depends on DRM
 	depends on DRM
 	depends on ARCH_MEDIATEK || (ARM && COMPILE_TEST)
 	depends on ARCH_MEDIATEK || (ARM && COMPILE_TEST)
+	depends on COMMON_CLK
+	depends on HAVE_ARM_SMCCC
+	depends on OF
 	select DRM_GEM_CMA_HELPER
 	select DRM_GEM_CMA_HELPER
 	select DRM_KMS_HELPER
 	select DRM_KMS_HELPER
 	select DRM_MIPI_DSI
 	select DRM_MIPI_DSI

+ 0 - 9
drivers/gpu/drm/radeon/radeon_atpx_handler.c

@@ -198,16 +198,7 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx)
 	atpx->is_hybrid = false;
 	atpx->is_hybrid = false;
 	if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
 	if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
 		printk("ATPX Hybrid Graphics\n");
 		printk("ATPX Hybrid Graphics\n");
-#if 1
-		/* This is a temporary hack until the D3 cold support
-		 * makes it upstream.  The ATPX power_control method seems
-		 * to still work on even if the system should be using
-		 * the new standardized hybrid D3 cold ACPI interface.
-		 */
-		atpx->functions.power_cntl = true;
-#else
 		atpx->functions.power_cntl = false;
 		atpx->functions.power_cntl = false;
-#endif
 		atpx->is_hybrid = true;
 		atpx->is_hybrid = true;
 	}
 	}
 
 

+ 1 - 1
drivers/hwmon/it87.c

@@ -491,7 +491,7 @@ struct it87_sio_data {
 struct it87_data {
 struct it87_data {
 	const struct attribute_group *groups[7];
 	const struct attribute_group *groups[7];
 	enum chips type;
 	enum chips type;
-	u16 features;
+	u32 features;
 	u8 peci_mask;
 	u8 peci_mask;
 	u8 old_peci_mask;
 	u8 old_peci_mask;
 
 

+ 14 - 10
drivers/i2c/busses/i2c-at91.c

@@ -38,6 +38,7 @@
 #define AT91_I2C_TIMEOUT	msecs_to_jiffies(100)	/* transfer timeout */
 #define AT91_I2C_TIMEOUT	msecs_to_jiffies(100)	/* transfer timeout */
 #define AT91_I2C_DMA_THRESHOLD	8			/* enable DMA if transfer size is bigger than this threshold */
 #define AT91_I2C_DMA_THRESHOLD	8			/* enable DMA if transfer size is bigger than this threshold */
 #define AUTOSUSPEND_TIMEOUT		2000
 #define AUTOSUSPEND_TIMEOUT		2000
+#define AT91_I2C_MAX_ALT_CMD_DATA_SIZE	256
 
 
 /* AT91 TWI register definitions */
 /* AT91 TWI register definitions */
 #define	AT91_TWI_CR		0x0000	/* Control Register */
 #define	AT91_TWI_CR		0x0000	/* Control Register */
@@ -141,6 +142,7 @@ struct at91_twi_dev {
 	unsigned twi_cwgr_reg;
 	unsigned twi_cwgr_reg;
 	struct at91_twi_pdata *pdata;
 	struct at91_twi_pdata *pdata;
 	bool use_dma;
 	bool use_dma;
+	bool use_alt_cmd;
 	bool recv_len_abort;
 	bool recv_len_abort;
 	u32 fifo_size;
 	u32 fifo_size;
 	struct at91_twi_dma dma;
 	struct at91_twi_dma dma;
@@ -269,7 +271,7 @@ static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
 
 
 	/* send stop when last byte has been written */
 	/* send stop when last byte has been written */
 	if (--dev->buf_len == 0)
 	if (--dev->buf_len == 0)
-		if (!dev->pdata->has_alt_cmd)
+		if (!dev->use_alt_cmd)
 			at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
 			at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
 
 
 	dev_dbg(dev->dev, "wrote 0x%x, to go %d\n", *dev->buf, dev->buf_len);
 	dev_dbg(dev->dev, "wrote 0x%x, to go %d\n", *dev->buf, dev->buf_len);
@@ -292,7 +294,7 @@ static void at91_twi_write_data_dma_callback(void *data)
 	 * we just have to enable TXCOMP one.
 	 * we just have to enable TXCOMP one.
 	 */
 	 */
 	at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
 	at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
-	if (!dev->pdata->has_alt_cmd)
+	if (!dev->use_alt_cmd)
 		at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
 		at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
 }
 }
 
 
@@ -410,7 +412,7 @@ static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
 	}
 	}
 
 
 	/* send stop if second but last byte has been read */
 	/* send stop if second but last byte has been read */
-	if (!dev->pdata->has_alt_cmd && dev->buf_len == 1)
+	if (!dev->use_alt_cmd && dev->buf_len == 1)
 		at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
 		at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
 
 
 	dev_dbg(dev->dev, "read 0x%x, to go %d\n", *dev->buf, dev->buf_len);
 	dev_dbg(dev->dev, "read 0x%x, to go %d\n", *dev->buf, dev->buf_len);
@@ -426,7 +428,7 @@ static void at91_twi_read_data_dma_callback(void *data)
 	dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]),
 	dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]),
 			 dev->buf_len, DMA_FROM_DEVICE);
 			 dev->buf_len, DMA_FROM_DEVICE);
 
 
-	if (!dev->pdata->has_alt_cmd) {
+	if (!dev->use_alt_cmd) {
 		/* The last two bytes have to be read without using dma */
 		/* The last two bytes have to be read without using dma */
 		dev->buf += dev->buf_len - 2;
 		dev->buf += dev->buf_len - 2;
 		dev->buf_len = 2;
 		dev->buf_len = 2;
@@ -443,7 +445,7 @@ static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
 	struct dma_chan *chan_rx = dma->chan_rx;
 	struct dma_chan *chan_rx = dma->chan_rx;
 	size_t buf_len;
 	size_t buf_len;
 
 
-	buf_len = (dev->pdata->has_alt_cmd) ? dev->buf_len : dev->buf_len - 2;
+	buf_len = (dev->use_alt_cmd) ? dev->buf_len : dev->buf_len - 2;
 	dma->direction = DMA_FROM_DEVICE;
 	dma->direction = DMA_FROM_DEVICE;
 
 
 	/* Keep in mind that we won't use dma to read the last two bytes */
 	/* Keep in mind that we won't use dma to read the last two bytes */
@@ -651,7 +653,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
 		unsigned start_flags = AT91_TWI_START;
 		unsigned start_flags = AT91_TWI_START;
 
 
 		/* if only one byte is to be read, immediately stop transfer */
 		/* if only one byte is to be read, immediately stop transfer */
-		if (!has_alt_cmd && dev->buf_len <= 1 &&
+		if (!dev->use_alt_cmd && dev->buf_len <= 1 &&
 		    !(dev->msg->flags & I2C_M_RECV_LEN))
 		    !(dev->msg->flags & I2C_M_RECV_LEN))
 			start_flags |= AT91_TWI_STOP;
 			start_flags |= AT91_TWI_STOP;
 		at91_twi_write(dev, AT91_TWI_CR, start_flags);
 		at91_twi_write(dev, AT91_TWI_CR, start_flags);
@@ -745,7 +747,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
 	int ret;
 	int ret;
 	unsigned int_addr_flag = 0;
 	unsigned int_addr_flag = 0;
 	struct i2c_msg *m_start = msg;
 	struct i2c_msg *m_start = msg;
-	bool is_read, use_alt_cmd = false;
+	bool is_read;
 
 
 	dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
 	dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
 
 
@@ -768,14 +770,16 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
 		at91_twi_write(dev, AT91_TWI_IADR, internal_address);
 		at91_twi_write(dev, AT91_TWI_IADR, internal_address);
 	}
 	}
 
 
+	dev->use_alt_cmd = false;
 	is_read = (m_start->flags & I2C_M_RD);
 	is_read = (m_start->flags & I2C_M_RD);
 	if (dev->pdata->has_alt_cmd) {
 	if (dev->pdata->has_alt_cmd) {
-		if (m_start->len > 0) {
+		if (m_start->len > 0 &&
+		    m_start->len < AT91_I2C_MAX_ALT_CMD_DATA_SIZE) {
 			at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMEN);
 			at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMEN);
 			at91_twi_write(dev, AT91_TWI_ACR,
 			at91_twi_write(dev, AT91_TWI_ACR,
 				       AT91_TWI_ACR_DATAL(m_start->len) |
 				       AT91_TWI_ACR_DATAL(m_start->len) |
 				       ((is_read) ? AT91_TWI_ACR_DIR : 0));
 				       ((is_read) ? AT91_TWI_ACR_DIR : 0));
-			use_alt_cmd = true;
+			dev->use_alt_cmd = true;
 		} else {
 		} else {
 			at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMDIS);
 			at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMDIS);
 		}
 		}
@@ -784,7 +788,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
 	at91_twi_write(dev, AT91_TWI_MMR,
 	at91_twi_write(dev, AT91_TWI_MMR,
 		       (m_start->addr << 16) |
 		       (m_start->addr << 16) |
 		       int_addr_flag |
 		       int_addr_flag |
-		       ((!use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0));
+		       ((!dev->use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0));
 
 
 	dev->buf_len = m_start->len;
 	dev->buf_len = m_start->len;
 	dev->buf = m_start->buf;
 	dev->buf = m_start->buf;

+ 1 - 1
drivers/i2c/busses/i2c-bcm-iproc.c

@@ -158,7 +158,7 @@ static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data)
 
 
 	if (status & BIT(IS_M_START_BUSY_SHIFT)) {
 	if (status & BIT(IS_M_START_BUSY_SHIFT)) {
 		iproc_i2c->xfer_is_done = 1;
 		iproc_i2c->xfer_is_done = 1;
-		complete_all(&iproc_i2c->done);
+		complete(&iproc_i2c->done);
 	}
 	}
 
 
 	writel(status, iproc_i2c->base + IS_OFFSET);
 	writel(status, iproc_i2c->base + IS_OFFSET);

+ 1 - 1
drivers/i2c/busses/i2c-bcm-kona.c

@@ -229,7 +229,7 @@ static irqreturn_t bcm_kona_i2c_isr(int irq, void *devid)
 		       dev->base + TXFCR_OFFSET);
 		       dev->base + TXFCR_OFFSET);
 
 
 	writel(status & ~ISR_RESERVED_MASK, dev->base + ISR_OFFSET);
 	writel(status & ~ISR_RESERVED_MASK, dev->base + ISR_OFFSET);
-	complete_all(&dev->done);
+	complete(&dev->done);
 
 
 	return IRQ_HANDLED;
 	return IRQ_HANDLED;
 }
 }

+ 1 - 1
drivers/i2c/busses/i2c-brcmstb.c

@@ -228,7 +228,7 @@ static irqreturn_t brcmstb_i2c_isr(int irq, void *devid)
 		return IRQ_NONE;
 		return IRQ_NONE;
 
 
 	brcmstb_i2c_enable_disable_irq(dev, INT_DISABLE);
 	brcmstb_i2c_enable_disable_irq(dev, INT_DISABLE);
-	complete_all(&dev->done);
+	complete(&dev->done);
 
 
 	dev_dbg(dev->device, "isr handled");
 	dev_dbg(dev->device, "isr handled");
 	return IRQ_HANDLED;
 	return IRQ_HANDLED;

+ 1 - 1
drivers/i2c/busses/i2c-cros-ec-tunnel.c

@@ -215,7 +215,7 @@ static int ec_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg i2c_msgs[],
 	msg->outsize = request_len;
 	msg->outsize = request_len;
 	msg->insize = response_len;
 	msg->insize = response_len;
 
 
-	result = cros_ec_cmd_xfer(bus->ec, msg);
+	result = cros_ec_cmd_xfer_status(bus->ec, msg);
 	if (result < 0) {
 	if (result < 0) {
 		dev_err(dev, "Error transferring EC i2c message %d\n", result);
 		dev_err(dev, "Error transferring EC i2c message %d\n", result);
 		goto exit;
 		goto exit;

+ 3 - 3
drivers/i2c/busses/i2c-meson.c

@@ -211,7 +211,7 @@ static void meson_i2c_stop(struct meson_i2c *i2c)
 		meson_i2c_add_token(i2c, TOKEN_STOP);
 		meson_i2c_add_token(i2c, TOKEN_STOP);
 	} else {
 	} else {
 		i2c->state = STATE_IDLE;
 		i2c->state = STATE_IDLE;
-		complete_all(&i2c->done);
+		complete(&i2c->done);
 	}
 	}
 }
 }
 
 
@@ -238,7 +238,7 @@ static irqreturn_t meson_i2c_irq(int irqno, void *dev_id)
 		dev_dbg(i2c->dev, "error bit set\n");
 		dev_dbg(i2c->dev, "error bit set\n");
 		i2c->error = -ENXIO;
 		i2c->error = -ENXIO;
 		i2c->state = STATE_IDLE;
 		i2c->state = STATE_IDLE;
-		complete_all(&i2c->done);
+		complete(&i2c->done);
 		goto out;
 		goto out;
 	}
 	}
 
 
@@ -269,7 +269,7 @@ static irqreturn_t meson_i2c_irq(int irqno, void *dev_id)
 		break;
 		break;
 	case STATE_STOP:
 	case STATE_STOP:
 		i2c->state = STATE_IDLE;
 		i2c->state = STATE_IDLE;
-		complete_all(&i2c->done);
+		complete(&i2c->done);
 		break;
 		break;
 	case STATE_IDLE:
 	case STATE_IDLE:
 		break;
 		break;

+ 10 - 4
drivers/i2c/busses/i2c-ocores.c

@@ -379,6 +379,7 @@ static int ocores_i2c_of_probe(struct platform_device *pdev,
 			if (!clock_frequency_present) {
 			if (!clock_frequency_present) {
 				dev_err(&pdev->dev,
 				dev_err(&pdev->dev,
 					"Missing required parameter 'opencores,ip-clock-frequency'\n");
 					"Missing required parameter 'opencores,ip-clock-frequency'\n");
+				clk_disable_unprepare(i2c->clk);
 				return -ENODEV;
 				return -ENODEV;
 			}
 			}
 			i2c->ip_clock_khz = clock_frequency / 1000;
 			i2c->ip_clock_khz = clock_frequency / 1000;
@@ -467,20 +468,21 @@ static int ocores_i2c_probe(struct platform_device *pdev)
 		default:
 		default:
 			dev_err(&pdev->dev, "Unsupported I/O width (%d)\n",
 			dev_err(&pdev->dev, "Unsupported I/O width (%d)\n",
 				i2c->reg_io_width);
 				i2c->reg_io_width);
-			return -EINVAL;
+			ret = -EINVAL;
+			goto err_clk;
 		}
 		}
 	}
 	}
 
 
 	ret = ocores_init(&pdev->dev, i2c);
 	ret = ocores_init(&pdev->dev, i2c);
 	if (ret)
 	if (ret)
-		return ret;
+		goto err_clk;
 
 
 	init_waitqueue_head(&i2c->wait);
 	init_waitqueue_head(&i2c->wait);
 	ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0,
 	ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0,
 			       pdev->name, i2c);
 			       pdev->name, i2c);
 	if (ret) {
 	if (ret) {
 		dev_err(&pdev->dev, "Cannot claim IRQ\n");
 		dev_err(&pdev->dev, "Cannot claim IRQ\n");
-		return ret;
+		goto err_clk;
 	}
 	}
 
 
 	/* hook up driver to tree */
 	/* hook up driver to tree */
@@ -494,7 +496,7 @@ static int ocores_i2c_probe(struct platform_device *pdev)
 	ret = i2c_add_adapter(&i2c->adap);
 	ret = i2c_add_adapter(&i2c->adap);
 	if (ret) {
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to add adapter\n");
 		dev_err(&pdev->dev, "Failed to add adapter\n");
-		return ret;
+		goto err_clk;
 	}
 	}
 
 
 	/* add in known devices to the bus */
 	/* add in known devices to the bus */
@@ -504,6 +506,10 @@ static int ocores_i2c_probe(struct platform_device *pdev)
 	}
 	}
 
 
 	return 0;
 	return 0;
+
+err_clk:
+	clk_disable_unprepare(i2c->clk);
+	return ret;
 }
 }
 
 
 static int ocores_i2c_remove(struct platform_device *pdev)
 static int ocores_i2c_remove(struct platform_device *pdev)

+ 3 - 1
drivers/i2c/muxes/i2c-demux-pinctrl.c

@@ -68,7 +68,7 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
 	adap = of_find_i2c_adapter_by_node(priv->chan[new_chan].parent_np);
 	adap = of_find_i2c_adapter_by_node(priv->chan[new_chan].parent_np);
 	if (!adap) {
 	if (!adap) {
 		ret = -ENODEV;
 		ret = -ENODEV;
-		goto err;
+		goto err_with_revert;
 	}
 	}
 
 
 	p = devm_pinctrl_get_select(adap->dev.parent, priv->bus_name);
 	p = devm_pinctrl_get_select(adap->dev.parent, priv->bus_name);
@@ -103,6 +103,8 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
 
 
  err_with_put:
  err_with_put:
 	i2c_put_adapter(adap);
 	i2c_put_adapter(adap);
+ err_with_revert:
+	of_changeset_revert(&priv->chan[new_chan].chgset);
  err:
  err:
 	dev_err(priv->dev, "failed to setup demux-adapter %d (%d)\n", new_chan, ret);
 	dev_err(priv->dev, "failed to setup demux-adapter %d (%d)\n", new_chan, ret);
 	return ret;
 	return ret;

+ 1 - 1
drivers/md/dm-crypt.c

@@ -181,7 +181,7 @@ struct crypt_config {
 	u8 key[0];
 	u8 key[0];
 };
 };
 
 
-#define MIN_IOS        16
+#define MIN_IOS        64
 
 
 static void clone_init(struct dm_crypt_io *, struct bio *);
 static void clone_init(struct dm_crypt_io *, struct bio *);
 static void kcryptd_queue_crypt(struct dm_crypt_io *io);
 static void kcryptd_queue_crypt(struct dm_crypt_io *io);

+ 47 - 35
drivers/md/dm-raid.c

@@ -191,7 +191,6 @@ struct raid_dev {
 #define RT_FLAG_RS_BITMAP_LOADED	2
 #define RT_FLAG_RS_BITMAP_LOADED	2
 #define RT_FLAG_UPDATE_SBS		3
 #define RT_FLAG_UPDATE_SBS		3
 #define RT_FLAG_RESHAPE_RS		4
 #define RT_FLAG_RESHAPE_RS		4
-#define RT_FLAG_KEEP_RS_FROZEN		5
 
 
 /* Array elements of 64 bit needed for rebuild/failed disk bits */
 /* Array elements of 64 bit needed for rebuild/failed disk bits */
 #define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
 #define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
@@ -861,6 +860,9 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
 {
 {
 	unsigned long min_region_size = rs->ti->len / (1 << 21);
 	unsigned long min_region_size = rs->ti->len / (1 << 21);
 
 
+	if (rs_is_raid0(rs))
+		return 0;
+
 	if (!region_size) {
 	if (!region_size) {
 		/*
 		/*
 		 * Choose a reasonable default.	 All figures in sectors.
 		 * Choose a reasonable default.	 All figures in sectors.
@@ -930,6 +932,8 @@ static int validate_raid_redundancy(struct raid_set *rs)
 			rebuild_cnt++;
 			rebuild_cnt++;
 
 
 	switch (rs->raid_type->level) {
 	switch (rs->raid_type->level) {
+	case 0:
+		break;
 	case 1:
 	case 1:
 		if (rebuild_cnt >= rs->md.raid_disks)
 		if (rebuild_cnt >= rs->md.raid_disks)
 			goto too_many;
 			goto too_many;
@@ -2335,6 +2339,13 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
 		case 0:
 		case 0:
 			break;
 			break;
 		default:
 		default:
+			/*
+			 * We have to keep any raid0 data/metadata device pairs or
+			 * the MD raid0 personality will fail to start the array.
+			 */
+			if (rs_is_raid0(rs))
+				continue;
+
 			dev = container_of(rdev, struct raid_dev, rdev);
 			dev = container_of(rdev, struct raid_dev, rdev);
 			if (dev->meta_dev)
 			if (dev->meta_dev)
 				dm_put_device(ti, dev->meta_dev);
 				dm_put_device(ti, dev->meta_dev);
@@ -2579,7 +2590,6 @@ static int rs_prepare_reshape(struct raid_set *rs)
 		} else {
 		} else {
 			/* Process raid1 without delta_disks */
 			/* Process raid1 without delta_disks */
 			mddev->raid_disks = rs->raid_disks;
 			mddev->raid_disks = rs->raid_disks;
-			set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags);
 			reshape = false;
 			reshape = false;
 		}
 		}
 	} else {
 	} else {
@@ -2590,7 +2600,6 @@ static int rs_prepare_reshape(struct raid_set *rs)
 	if (reshape) {
 	if (reshape) {
 		set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags);
 		set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags);
 		set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
 		set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
-		set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags);
 	} else if (mddev->raid_disks < rs->raid_disks)
 	} else if (mddev->raid_disks < rs->raid_disks)
 		/* Create new superblocks and bitmaps, if any new disks */
 		/* Create new superblocks and bitmaps, if any new disks */
 		set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
 		set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
@@ -2902,7 +2911,6 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 			goto bad;
 			goto bad;
 
 
 		set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
 		set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
-		set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags);
 		/* Takeover ain't recovery, so disable recovery */
 		/* Takeover ain't recovery, so disable recovery */
 		rs_setup_recovery(rs, MaxSector);
 		rs_setup_recovery(rs, MaxSector);
 		rs_set_new(rs);
 		rs_set_new(rs);
@@ -3386,21 +3394,28 @@ static void raid_postsuspend(struct dm_target *ti)
 {
 {
 	struct raid_set *rs = ti->private;
 	struct raid_set *rs = ti->private;
 
 
-	if (test_and_clear_bit(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) {
-		if (!rs->md.suspended)
-			mddev_suspend(&rs->md);
-		rs->md.ro = 1;
-	}
+	if (!rs->md.suspended)
+		mddev_suspend(&rs->md);
+
+	rs->md.ro = 1;
 }
 }
 
 
 static void attempt_restore_of_faulty_devices(struct raid_set *rs)
 static void attempt_restore_of_faulty_devices(struct raid_set *rs)
 {
 {
 	int i;
 	int i;
-	uint64_t failed_devices, cleared_failed_devices = 0;
+	uint64_t cleared_failed_devices[DISKS_ARRAY_ELEMS];
 	unsigned long flags;
 	unsigned long flags;
+	bool cleared = false;
 	struct dm_raid_superblock *sb;
 	struct dm_raid_superblock *sb;
+	struct mddev *mddev = &rs->md;
 	struct md_rdev *r;
 	struct md_rdev *r;
 
 
+	/* RAID personalities have to provide hot add/remove methods or we need to bail out. */
+	if (!mddev->pers || !mddev->pers->hot_add_disk || !mddev->pers->hot_remove_disk)
+		return;
+
+	memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices));
+
 	for (i = 0; i < rs->md.raid_disks; i++) {
 	for (i = 0; i < rs->md.raid_disks; i++) {
 		r = &rs->dev[i].rdev;
 		r = &rs->dev[i].rdev;
 		if (test_bit(Faulty, &r->flags) && r->sb_page &&
 		if (test_bit(Faulty, &r->flags) && r->sb_page &&
@@ -3420,7 +3435,7 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs)
 			 * ourselves.
 			 * ourselves.
 			 */
 			 */
 			if ((r->raid_disk >= 0) &&
 			if ((r->raid_disk >= 0) &&
-			    (r->mddev->pers->hot_remove_disk(r->mddev, r) != 0))
+			    (mddev->pers->hot_remove_disk(mddev, r) != 0))
 				/* Failed to revive this device, try next */
 				/* Failed to revive this device, try next */
 				continue;
 				continue;
 
 
@@ -3430,22 +3445,30 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs)
 			clear_bit(Faulty, &r->flags);
 			clear_bit(Faulty, &r->flags);
 			clear_bit(WriteErrorSeen, &r->flags);
 			clear_bit(WriteErrorSeen, &r->flags);
 			clear_bit(In_sync, &r->flags);
 			clear_bit(In_sync, &r->flags);
-			if (r->mddev->pers->hot_add_disk(r->mddev, r)) {
+			if (mddev->pers->hot_add_disk(mddev, r)) {
 				r->raid_disk = -1;
 				r->raid_disk = -1;
 				r->saved_raid_disk = -1;
 				r->saved_raid_disk = -1;
 				r->flags = flags;
 				r->flags = flags;
 			} else {
 			} else {
 				r->recovery_offset = 0;
 				r->recovery_offset = 0;
-				cleared_failed_devices |= 1 << i;
+				set_bit(i, (void *) cleared_failed_devices);
+				cleared = true;
 			}
 			}
 		}
 		}
 	}
 	}
-	if (cleared_failed_devices) {
+
+	/* If any failed devices could be cleared, update all sbs failed_devices bits */
+	if (cleared) {
+		uint64_t failed_devices[DISKS_ARRAY_ELEMS];
+
 		rdev_for_each(r, &rs->md) {
 		rdev_for_each(r, &rs->md) {
 			sb = page_address(r->sb_page);
 			sb = page_address(r->sb_page);
-			failed_devices = le64_to_cpu(sb->failed_devices);
-			failed_devices &= ~cleared_failed_devices;
-			sb->failed_devices = cpu_to_le64(failed_devices);
+			sb_retrieve_failed_devices(sb, failed_devices);
+
+			for (i = 0; i < DISKS_ARRAY_ELEMS; i++)
+				failed_devices[i] &= ~cleared_failed_devices[i];
+
+			sb_update_failed_devices(sb, failed_devices);
 		}
 		}
 	}
 	}
 }
 }
@@ -3610,26 +3633,15 @@ static void raid_resume(struct dm_target *ti)
 		 * devices are reachable again.
 		 * devices are reachable again.
 		 */
 		 */
 		attempt_restore_of_faulty_devices(rs);
 		attempt_restore_of_faulty_devices(rs);
-	} else {
-		mddev->ro = 0;
-		mddev->in_sync = 0;
+	}
 
 
-		/*
-		 * When passing in flags to the ctr, we expect userspace
-		 * to reset them because they made it to the superblocks
-		 * and reload the mapping anyway.
-		 *
-		 * -> only unfreeze recovery in case of a table reload or
-		 *    we'll have a bogus recovery/reshape position
-		 *    retrieved from the superblock by the ctr because
-		 *    the ongoing recovery/reshape will change it after read.
-		 */
-		if (!test_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags))
-			clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+	mddev->ro = 0;
+	mddev->in_sync = 0;
 
 
-		if (mddev->suspended)
-			mddev_resume(mddev);
-	}
+	clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+
+	if (mddev->suspended)
+		mddev_resume(mddev);
 }
 }
 
 
 static struct target_type raid_target = {
 static struct target_type raid_target = {

+ 5 - 2
drivers/md/dm-round-robin.c

@@ -210,14 +210,17 @@ static struct dm_path *rr_select_path(struct path_selector *ps, size_t nr_bytes)
 	struct path_info *pi = NULL;
 	struct path_info *pi = NULL;
 	struct dm_path *current_path = NULL;
 	struct dm_path *current_path = NULL;
 
 
+	local_irq_save(flags);
 	current_path = *this_cpu_ptr(s->current_path);
 	current_path = *this_cpu_ptr(s->current_path);
 	if (current_path) {
 	if (current_path) {
 		percpu_counter_dec(&s->repeat_count);
 		percpu_counter_dec(&s->repeat_count);
-		if (percpu_counter_read_positive(&s->repeat_count) > 0)
+		if (percpu_counter_read_positive(&s->repeat_count) > 0) {
+			local_irq_restore(flags);
 			return current_path;
 			return current_path;
+		}
 	}
 	}
 
 
-	spin_lock_irqsave(&s->lock, flags);
+	spin_lock(&s->lock);
 	if (!list_empty(&s->valid_paths)) {
 	if (!list_empty(&s->valid_paths)) {
 		pi = list_entry(s->valid_paths.next, struct path_info, list);
 		pi = list_entry(s->valid_paths.next, struct path_info, list);
 		list_move_tail(&pi->list, &s->valid_paths);
 		list_move_tail(&pi->list, &s->valid_paths);

+ 3 - 11
drivers/of/base.c

@@ -1631,8 +1631,7 @@ static int __of_parse_phandle_with_args(const struct device_node *np,
 	 */
 	 */
 
 
  err:
  err:
-	if (it.node)
-		of_node_put(it.node);
+	of_node_put(it.node);
 	return rc;
 	return rc;
 }
 }
 
 
@@ -2343,20 +2342,13 @@ struct device_node *of_graph_get_endpoint_by_regs(
 	const struct device_node *parent, int port_reg, int reg)
 	const struct device_node *parent, int port_reg, int reg)
 {
 {
 	struct of_endpoint endpoint;
 	struct of_endpoint endpoint;
-	struct device_node *node, *prev_node = NULL;
-
-	while (1) {
-		node = of_graph_get_next_endpoint(parent, prev_node);
-		of_node_put(prev_node);
-		if (!node)
-			break;
+	struct device_node *node = NULL;
 
 
+	for_each_endpoint_of_node(parent, node) {
 		of_graph_parse_endpoint(node, &endpoint);
 		of_graph_parse_endpoint(node, &endpoint);
 		if (((port_reg == -1) || (endpoint.port == port_reg)) &&
 		if (((port_reg == -1) || (endpoint.port == port_reg)) &&
 			((reg == -1) || (endpoint.id == reg)))
 			((reg == -1) || (endpoint.id == reg)))
 			return node;
 			return node;
-
-		prev_node = node;
 	}
 	}
 
 
 	return NULL;
 	return NULL;

+ 1 - 1
drivers/of/fdt.c

@@ -517,7 +517,7 @@ static void *__unflatten_device_tree(const void *blob,
 		pr_warning("End of tree marker overwritten: %08x\n",
 		pr_warning("End of tree marker overwritten: %08x\n",
 			   be32_to_cpup(mem + size));
 			   be32_to_cpup(mem + size));
 
 
-	if (detached) {
+	if (detached && mynodes) {
 		of_node_set_flag(*mynodes, OF_DETACHED);
 		of_node_set_flag(*mynodes, OF_DETACHED);
 		pr_debug("unflattened tree is detached\n");
 		pr_debug("unflattened tree is detached\n");
 	}
 	}

+ 3 - 2
drivers/of/irq.c

@@ -544,12 +544,15 @@ void __init of_irq_init(const struct of_device_id *matches)
 
 
 			list_del(&desc->list);
 			list_del(&desc->list);
 
 
+			of_node_set_flag(desc->dev, OF_POPULATED);
+
 			pr_debug("of_irq_init: init %s (%p), parent %p\n",
 			pr_debug("of_irq_init: init %s (%p), parent %p\n",
 				 desc->dev->full_name,
 				 desc->dev->full_name,
 				 desc->dev, desc->interrupt_parent);
 				 desc->dev, desc->interrupt_parent);
 			ret = desc->irq_init_cb(desc->dev,
 			ret = desc->irq_init_cb(desc->dev,
 						desc->interrupt_parent);
 						desc->interrupt_parent);
 			if (ret) {
 			if (ret) {
+				of_node_clear_flag(desc->dev, OF_POPULATED);
 				kfree(desc);
 				kfree(desc);
 				continue;
 				continue;
 			}
 			}
@@ -559,8 +562,6 @@ void __init of_irq_init(const struct of_device_id *matches)
 			 * its children can get processed in a subsequent pass.
 			 * its children can get processed in a subsequent pass.
 			 */
 			 */
 			list_add_tail(&desc->list, &intc_parent_list);
 			list_add_tail(&desc->list, &intc_parent_list);
-
-			of_node_set_flag(desc->dev, OF_POPULATED);
 		}
 		}
 
 
 		/* Get the next pending parent that might have children */
 		/* Get the next pending parent that might have children */

+ 2 - 0
drivers/of/platform.c

@@ -497,6 +497,7 @@ int of_platform_default_populate(struct device_node *root,
 }
 }
 EXPORT_SYMBOL_GPL(of_platform_default_populate);
 EXPORT_SYMBOL_GPL(of_platform_default_populate);
 
 
+#ifndef CONFIG_PPC
 static int __init of_platform_default_populate_init(void)
 static int __init of_platform_default_populate_init(void)
 {
 {
 	struct device_node *node;
 	struct device_node *node;
@@ -521,6 +522,7 @@ static int __init of_platform_default_populate_init(void)
 	return 0;
 	return 0;
 }
 }
 arch_initcall_sync(of_platform_default_populate_init);
 arch_initcall_sync(of_platform_default_populate_init);
+#endif
 
 
 static int of_platform_device_destroy(struct device *dev, void *data)
 static int of_platform_device_destroy(struct device *dev, void *data)
 {
 {

+ 11 - 2
drivers/scsi/aacraid/commctrl.c

@@ -63,7 +63,7 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
 	struct fib *fibptr;
 	struct fib *fibptr;
 	struct hw_fib * hw_fib = (struct hw_fib *)0;
 	struct hw_fib * hw_fib = (struct hw_fib *)0;
 	dma_addr_t hw_fib_pa = (dma_addr_t)0LL;
 	dma_addr_t hw_fib_pa = (dma_addr_t)0LL;
-	unsigned size;
+	unsigned int size, osize;
 	int retval;
 	int retval;
 
 
 	if (dev->in_reset) {
 	if (dev->in_reset) {
@@ -87,7 +87,8 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
 	 *	will not overrun the buffer when we copy the memory. Return
 	 *	will not overrun the buffer when we copy the memory. Return
 	 *	an error if we would.
 	 *	an error if we would.
 	 */
 	 */
-	size = le16_to_cpu(kfib->header.Size) + sizeof(struct aac_fibhdr);
+	osize = size = le16_to_cpu(kfib->header.Size) +
+		sizeof(struct aac_fibhdr);
 	if (size < le16_to_cpu(kfib->header.SenderSize))
 	if (size < le16_to_cpu(kfib->header.SenderSize))
 		size = le16_to_cpu(kfib->header.SenderSize);
 		size = le16_to_cpu(kfib->header.SenderSize);
 	if (size > dev->max_fib_size) {
 	if (size > dev->max_fib_size) {
@@ -118,6 +119,14 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
 		goto cleanup;
 		goto cleanup;
 	}
 	}
 
 
+	/* Sanity check the second copy */
+	if ((osize != le16_to_cpu(kfib->header.Size) +
+		sizeof(struct aac_fibhdr))
+		|| (size < le16_to_cpu(kfib->header.SenderSize))) {
+		retval = -EINVAL;
+		goto cleanup;
+	}
+
 	if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) {
 	if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) {
 		aac_adapter_interrupt(dev);
 		aac_adapter_interrupt(dev);
 		/*
 		/*

+ 1 - 1
drivers/scsi/fcoe/fcoe_ctlr.c

@@ -2923,7 +2923,7 @@ static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
 	mutex_unlock(&fip->ctlr_mutex);
 	mutex_unlock(&fip->ctlr_mutex);
 
 
 drop:
 drop:
-	kfree(skb);
+	kfree_skb(skb);
 	return rc;
 	return rc;
 }
 }
 
 

+ 3 - 3
drivers/scsi/megaraid/megaraid_sas_base.c

@@ -5037,7 +5037,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
 	/* Find first memory bar */
 	/* Find first memory bar */
 	bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
 	bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
 	instance->bar = find_first_bit(&bar_list, sizeof(unsigned long));
 	instance->bar = find_first_bit(&bar_list, sizeof(unsigned long));
-	if (pci_request_selected_regions(instance->pdev, instance->bar,
+	if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
 					 "megasas: LSI")) {
 					 "megasas: LSI")) {
 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
 		return -EBUSY;
 		return -EBUSY;
@@ -5339,7 +5339,7 @@ fail_ready_state:
 	iounmap(instance->reg_set);
 	iounmap(instance->reg_set);
 
 
       fail_ioremap:
       fail_ioremap:
-	pci_release_selected_regions(instance->pdev, instance->bar);
+	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
 
 
 	return -EINVAL;
 	return -EINVAL;
 }
 }
@@ -5360,7 +5360,7 @@ static void megasas_release_mfi(struct megasas_instance *instance)
 
 
 	iounmap(instance->reg_set);
 	iounmap(instance->reg_set);
 
 
-	pci_release_selected_regions(instance->pdev, instance->bar);
+	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
 }
 }
 
 
 /**
 /**

+ 1 - 1
drivers/scsi/megaraid/megaraid_sas_fusion.c

@@ -2603,7 +2603,7 @@ megasas_release_fusion(struct megasas_instance *instance)
 
 
 	iounmap(instance->reg_set);
 	iounmap(instance->reg_set);
 
 
-	pci_release_selected_regions(instance->pdev, instance->bar);
+	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
 }
 }
 
 
 /**
 /**

+ 11 - 11
drivers/scsi/mpt3sas/mpt3sas_base.c

@@ -2188,6 +2188,17 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
 	} else
 	} else
 		ioc->msix96_vector = 0;
 		ioc->msix96_vector = 0;
 
 
+	if (ioc->is_warpdrive) {
+		ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
+		    &ioc->chip->ReplyPostHostIndex;
+
+		for (i = 1; i < ioc->cpu_msix_table_sz; i++)
+			ioc->reply_post_host_index[i] =
+			(resource_size_t __iomem *)
+			((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
+			* 4)));
+	}
+
 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
 		pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
 		pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
 		    reply_q->name,  ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
 		    reply_q->name,  ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
@@ -5280,17 +5291,6 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
 	if (r)
 	if (r)
 		goto out_free_resources;
 		goto out_free_resources;
 
 
-	if (ioc->is_warpdrive) {
-		ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
-		    &ioc->chip->ReplyPostHostIndex;
-
-		for (i = 1; i < ioc->cpu_msix_table_sz; i++)
-			ioc->reply_post_host_index[i] =
-			(resource_size_t __iomem *)
-			((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
-			* 4)));
-	}
-
 	pci_set_drvdata(ioc->pdev, ioc->shost);
 	pci_set_drvdata(ioc->pdev, ioc->shost);
 	r = _base_get_ioc_facts(ioc, CAN_SLEEP);
 	r = _base_get_ioc_facts(ioc, CAN_SLEEP);
 	if (r)
 	if (r)

+ 2 - 1
drivers/scsi/ses.c

@@ -778,6 +778,8 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev)
 	if (!edev)
 	if (!edev)
 		return;
 		return;
 
 
+	enclosure_unregister(edev);
+
 	ses_dev = edev->scratch;
 	ses_dev = edev->scratch;
 	edev->scratch = NULL;
 	edev->scratch = NULL;
 
 
@@ -789,7 +791,6 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev)
 	kfree(edev->component[0].scratch);
 	kfree(edev->component[0].scratch);
 
 
 	put_device(&edev->edev);
 	put_device(&edev->edev);
-	enclosure_unregister(edev);
 }
 }
 
 
 static void ses_intf_remove(struct device *cdev,
 static void ses_intf_remove(struct device *cdev,

+ 2 - 3
drivers/usb/class/cdc-acm.c

@@ -1354,7 +1354,6 @@ made_compressed_probe:
 	spin_lock_init(&acm->write_lock);
 	spin_lock_init(&acm->write_lock);
 	spin_lock_init(&acm->read_lock);
 	spin_lock_init(&acm->read_lock);
 	mutex_init(&acm->mutex);
 	mutex_init(&acm->mutex);
-	acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress);
 	acm->is_int_ep = usb_endpoint_xfer_int(epread);
 	acm->is_int_ep = usb_endpoint_xfer_int(epread);
 	if (acm->is_int_ep)
 	if (acm->is_int_ep)
 		acm->bInterval = epread->bInterval;
 		acm->bInterval = epread->bInterval;
@@ -1394,14 +1393,14 @@ made_compressed_probe:
 		urb->transfer_dma = rb->dma;
 		urb->transfer_dma = rb->dma;
 		if (acm->is_int_ep) {
 		if (acm->is_int_ep) {
 			usb_fill_int_urb(urb, acm->dev,
 			usb_fill_int_urb(urb, acm->dev,
-					 acm->rx_endpoint,
+					 usb_rcvintpipe(usb_dev, epread->bEndpointAddress),
 					 rb->base,
 					 rb->base,
 					 acm->readsize,
 					 acm->readsize,
 					 acm_read_bulk_callback, rb,
 					 acm_read_bulk_callback, rb,
 					 acm->bInterval);
 					 acm->bInterval);
 		} else {
 		} else {
 			usb_fill_bulk_urb(urb, acm->dev,
 			usb_fill_bulk_urb(urb, acm->dev,
-					  acm->rx_endpoint,
+					  usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress),
 					  rb->base,
 					  rb->base,
 					  acm->readsize,
 					  acm->readsize,
 					  acm_read_bulk_callback, rb);
 					  acm_read_bulk_callback, rb);

+ 0 - 1
drivers/usb/class/cdc-acm.h

@@ -96,7 +96,6 @@ struct acm {
 	struct acm_rb read_buffers[ACM_NR];
 	struct acm_rb read_buffers[ACM_NR];
 	struct acm_wb *putbuffer;			/* for acm_tty_put_char() */
 	struct acm_wb *putbuffer;			/* for acm_tty_put_char() */
 	int rx_buflimit;
 	int rx_buflimit;
-	int rx_endpoint;
 	spinlock_t read_lock;
 	spinlock_t read_lock;
 	int write_used;					/* number of non-empty write buffers */
 	int write_used;					/* number of non-empty write buffers */
 	int transmitting;
 	int transmitting;

+ 63 - 3
drivers/usb/core/config.c

@@ -171,6 +171,31 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
 							ep, buffer, size);
 							ep, buffer, size);
 }
 }
 
 
+static const unsigned short low_speed_maxpacket_maxes[4] = {
+	[USB_ENDPOINT_XFER_CONTROL] = 8,
+	[USB_ENDPOINT_XFER_ISOC] = 0,
+	[USB_ENDPOINT_XFER_BULK] = 0,
+	[USB_ENDPOINT_XFER_INT] = 8,
+};
+static const unsigned short full_speed_maxpacket_maxes[4] = {
+	[USB_ENDPOINT_XFER_CONTROL] = 64,
+	[USB_ENDPOINT_XFER_ISOC] = 1023,
+	[USB_ENDPOINT_XFER_BULK] = 64,
+	[USB_ENDPOINT_XFER_INT] = 64,
+};
+static const unsigned short high_speed_maxpacket_maxes[4] = {
+	[USB_ENDPOINT_XFER_CONTROL] = 64,
+	[USB_ENDPOINT_XFER_ISOC] = 1024,
+	[USB_ENDPOINT_XFER_BULK] = 512,
+	[USB_ENDPOINT_XFER_INT] = 1023,
+};
+static const unsigned short super_speed_maxpacket_maxes[4] = {
+	[USB_ENDPOINT_XFER_CONTROL] = 512,
+	[USB_ENDPOINT_XFER_ISOC] = 1024,
+	[USB_ENDPOINT_XFER_BULK] = 1024,
+	[USB_ENDPOINT_XFER_INT] = 1024,
+};
+
 static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
 static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
     int asnum, struct usb_host_interface *ifp, int num_ep,
     int asnum, struct usb_host_interface *ifp, int num_ep,
     unsigned char *buffer, int size)
     unsigned char *buffer, int size)
@@ -179,6 +204,8 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
 	struct usb_endpoint_descriptor *d;
 	struct usb_endpoint_descriptor *d;
 	struct usb_host_endpoint *endpoint;
 	struct usb_host_endpoint *endpoint;
 	int n, i, j, retval;
 	int n, i, j, retval;
+	unsigned int maxp;
+	const unsigned short *maxpacket_maxes;
 
 
 	d = (struct usb_endpoint_descriptor *) buffer;
 	d = (struct usb_endpoint_descriptor *) buffer;
 	buffer += d->bLength;
 	buffer += d->bLength;
@@ -286,6 +313,42 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
 			endpoint->desc.wMaxPacketSize = cpu_to_le16(8);
 			endpoint->desc.wMaxPacketSize = cpu_to_le16(8);
 	}
 	}
 
 
+	/* Validate the wMaxPacketSize field */
+	maxp = usb_endpoint_maxp(&endpoint->desc);
+
+	/* Find the highest legal maxpacket size for this endpoint */
+	i = 0;		/* additional transactions per microframe */
+	switch (to_usb_device(ddev)->speed) {
+	case USB_SPEED_LOW:
+		maxpacket_maxes = low_speed_maxpacket_maxes;
+		break;
+	case USB_SPEED_FULL:
+		maxpacket_maxes = full_speed_maxpacket_maxes;
+		break;
+	case USB_SPEED_HIGH:
+		/* Bits 12..11 are allowed only for HS periodic endpoints */
+		if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) {
+			i = maxp & (BIT(12) | BIT(11));
+			maxp &= ~i;
+		}
+		/* fallthrough */
+	default:
+		maxpacket_maxes = high_speed_maxpacket_maxes;
+		break;
+	case USB_SPEED_SUPER:
+	case USB_SPEED_SUPER_PLUS:
+		maxpacket_maxes = super_speed_maxpacket_maxes;
+		break;
+	}
+	j = maxpacket_maxes[usb_endpoint_type(&endpoint->desc)];
+
+	if (maxp > j) {
+		dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n",
+		    cfgno, inum, asnum, d->bEndpointAddress, maxp, j);
+		maxp = j;
+		endpoint->desc.wMaxPacketSize = cpu_to_le16(i | maxp);
+	}
+
 	/*
 	/*
 	 * Some buggy high speed devices have bulk endpoints using
 	 * Some buggy high speed devices have bulk endpoints using
 	 * maxpacket sizes other than 512.  High speed HCDs may not
 	 * maxpacket sizes other than 512.  High speed HCDs may not
@@ -293,9 +356,6 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
 	 */
 	 */
 	if (to_usb_device(ddev)->speed == USB_SPEED_HIGH
 	if (to_usb_device(ddev)->speed == USB_SPEED_HIGH
 			&& usb_endpoint_xfer_bulk(d)) {
 			&& usb_endpoint_xfer_bulk(d)) {
-		unsigned maxp;
-
-		maxp = usb_endpoint_maxp(&endpoint->desc) & 0x07ff;
 		if (maxp != 512)
 		if (maxp != 512)
 			dev_warn(ddev, "config %d interface %d altsetting %d "
 			dev_warn(ddev, "config %d interface %d altsetting %d "
 				"bulk endpoint 0x%X has invalid maxpacket %d\n",
 				"bulk endpoint 0x%X has invalid maxpacket %d\n",

+ 5 - 2
drivers/usb/core/devio.c

@@ -241,7 +241,8 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
 		goto error_decrease_mem;
 		goto error_decrease_mem;
 	}
 	}
 
 
-	mem = usb_alloc_coherent(ps->dev, size, GFP_USER, &dma_handle);
+	mem = usb_alloc_coherent(ps->dev, size, GFP_USER | __GFP_NOWARN,
+			&dma_handle);
 	if (!mem) {
 	if (!mem) {
 		ret = -ENOMEM;
 		ret = -ENOMEM;
 		goto error_free_usbm;
 		goto error_free_usbm;
@@ -2582,7 +2583,9 @@ static unsigned int usbdev_poll(struct file *file,
 	if (file->f_mode & FMODE_WRITE && !list_empty(&ps->async_completed))
 	if (file->f_mode & FMODE_WRITE && !list_empty(&ps->async_completed))
 		mask |= POLLOUT | POLLWRNORM;
 		mask |= POLLOUT | POLLWRNORM;
 	if (!connected(ps))
 	if (!connected(ps))
-		mask |= POLLERR | POLLHUP;
+		mask |= POLLHUP;
+	if (list_empty(&ps->list))
+		mask |= POLLERR;
 	return mask;
 	return mask;
 }
 }
 
 

+ 9 - 14
drivers/usb/core/hub.c

@@ -1052,14 +1052,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
 
 
 	/* Continue a partial initialization */
 	/* Continue a partial initialization */
 	if (type == HUB_INIT2 || type == HUB_INIT3) {
 	if (type == HUB_INIT2 || type == HUB_INIT3) {
-		device_lock(hub->intfdev);
+		device_lock(&hdev->dev);
 
 
 		/* Was the hub disconnected while we were waiting? */
 		/* Was the hub disconnected while we were waiting? */
-		if (hub->disconnected) {
-			device_unlock(hub->intfdev);
-			kref_put(&hub->kref, hub_release);
-			return;
-		}
+		if (hub->disconnected)
+			goto disconnected;
 		if (type == HUB_INIT2)
 		if (type == HUB_INIT2)
 			goto init2;
 			goto init2;
 		goto init3;
 		goto init3;
@@ -1262,7 +1259,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
 			queue_delayed_work(system_power_efficient_wq,
 			queue_delayed_work(system_power_efficient_wq,
 					&hub->init_work,
 					&hub->init_work,
 					msecs_to_jiffies(delay));
 					msecs_to_jiffies(delay));
-			device_unlock(hub->intfdev);
+			device_unlock(&hdev->dev);
 			return;		/* Continues at init3: below */
 			return;		/* Continues at init3: below */
 		} else {
 		} else {
 			msleep(delay);
 			msleep(delay);
@@ -1281,12 +1278,12 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
 	/* Scan all ports that need attention */
 	/* Scan all ports that need attention */
 	kick_hub_wq(hub);
 	kick_hub_wq(hub);
 
 
-	/* Allow autosuspend if it was suppressed */
-	if (type <= HUB_INIT3)
+	if (type == HUB_INIT2 || type == HUB_INIT3) {
+		/* Allow autosuspend if it was suppressed */
+ disconnected:
 		usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
 		usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
-
-	if (type == HUB_INIT2 || type == HUB_INIT3)
-		device_unlock(hub->intfdev);
+		device_unlock(&hdev->dev);
+	}
 
 
 	kref_put(&hub->kref, hub_release);
 	kref_put(&hub->kref, hub_release);
 }
 }
@@ -1315,8 +1312,6 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type)
 	struct usb_device *hdev = hub->hdev;
 	struct usb_device *hdev = hub->hdev;
 	int i;
 	int i;
 
 
-	cancel_delayed_work_sync(&hub->init_work);
-
 	/* hub_wq and related activity won't re-trigger */
 	/* hub_wq and related activity won't re-trigger */
 	hub->quiescing = 1;
 	hub->quiescing = 1;
 
 

+ 1 - 0
drivers/usb/dwc3/dwc3-of-simple.c

@@ -61,6 +61,7 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
 	if (!simple->clks)
 	if (!simple->clks)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
+	platform_set_drvdata(pdev, simple);
 	simple->dev = dev;
 	simple->dev = dev;
 
 
 	for (i = 0; i < simple->num_clocks; i++) {
 	for (i = 0; i < simple->num_clocks; i++) {

+ 2 - 0
drivers/usb/dwc3/dwc3-pci.c

@@ -37,6 +37,7 @@
 #define PCI_DEVICE_ID_INTEL_BXT			0x0aaa
 #define PCI_DEVICE_ID_INTEL_BXT			0x0aaa
 #define PCI_DEVICE_ID_INTEL_BXT_M		0x1aaa
 #define PCI_DEVICE_ID_INTEL_BXT_M		0x1aaa
 #define PCI_DEVICE_ID_INTEL_APL			0x5aaa
 #define PCI_DEVICE_ID_INTEL_APL			0x5aaa
+#define PCI_DEVICE_ID_INTEL_KBP			0xa2b0
 
 
 static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
 static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
 static const struct acpi_gpio_params cs_gpios = { 1, 0, false };
 static const struct acpi_gpio_params cs_gpios = { 1, 0, false };
@@ -227,6 +228,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), },
 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
 	{  }	/* Terminating Entry */
 	{  }	/* Terminating Entry */
 };
 };

+ 33 - 22
drivers/usb/dwc3/gadget.c

@@ -829,7 +829,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
 	if (!req->request.no_interrupt && !chain)
 	if (!req->request.no_interrupt && !chain)
 		trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI;
 		trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI;
 
 
-	if (last)
+	if (last && !usb_endpoint_xfer_isoc(dep->endpoint.desc))
 		trb->ctrl |= DWC3_TRB_CTRL_LST;
 		trb->ctrl |= DWC3_TRB_CTRL_LST;
 
 
 	if (chain)
 	if (chain)
@@ -1955,7 +1955,8 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
 
 
 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
 		struct dwc3_request *req, struct dwc3_trb *trb,
 		struct dwc3_request *req, struct dwc3_trb *trb,
-		const struct dwc3_event_depevt *event, int status)
+		const struct dwc3_event_depevt *event, int status,
+		int chain)
 {
 {
 	unsigned int		count;
 	unsigned int		count;
 	unsigned int		s_pkt = 0;
 	unsigned int		s_pkt = 0;
@@ -1964,17 +1965,22 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
 	dep->queued_requests--;
 	dep->queued_requests--;
 	trace_dwc3_complete_trb(dep, trb);
 	trace_dwc3_complete_trb(dep, trb);
 
 
+	/*
+	 * If we're in the middle of series of chained TRBs and we
+	 * receive a short transfer along the way, DWC3 will skip
+	 * through all TRBs including the last TRB in the chain (the
+	 * where CHN bit is zero. DWC3 will also avoid clearing HWO
+	 * bit and SW has to do it manually.
+	 *
+	 * We're going to do that here to avoid problems of HW trying
+	 * to use bogus TRBs for transfers.
+	 */
+	if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO))
+		trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+
 	if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
 	if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
-		/*
-		 * We continue despite the error. There is not much we
-		 * can do. If we don't clean it up we loop forever. If
-		 * we skip the TRB then it gets overwritten after a
-		 * while since we use them in a ring buffer. A BUG()
-		 * would help. Lets hope that if this occurs, someone
-		 * fixes the root cause instead of looking away :)
-		 */
-		dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
-				dep->name, trb);
+		return 1;
+
 	count = trb->size & DWC3_TRB_SIZE_MASK;
 	count = trb->size & DWC3_TRB_SIZE_MASK;
 
 
 	if (dep->direction) {
 	if (dep->direction) {
@@ -2013,15 +2019,7 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
 			s_pkt = 1;
 			s_pkt = 1;
 	}
 	}
 
 
-	/*
-	 * We assume here we will always receive the entire data block
-	 * which we should receive. Meaning, if we program RX to
-	 * receive 4K but we receive only 2K, we assume that's all we
-	 * should receive and we simply bounce the request back to the
-	 * gadget driver for further processing.
-	 */
-	req->request.actual += req->request.length - count;
-	if (s_pkt)
+	if (s_pkt && !chain)
 		return 1;
 		return 1;
 	if ((event->status & DEPEVT_STATUS_LST) &&
 	if ((event->status & DEPEVT_STATUS_LST) &&
 			(trb->ctrl & (DWC3_TRB_CTRL_LST |
 			(trb->ctrl & (DWC3_TRB_CTRL_LST |
@@ -2040,13 +2038,17 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
 	struct dwc3_trb		*trb;
 	struct dwc3_trb		*trb;
 	unsigned int		slot;
 	unsigned int		slot;
 	unsigned int		i;
 	unsigned int		i;
+	int			count = 0;
 	int			ret;
 	int			ret;
 
 
 	do {
 	do {
+		int chain;
+
 		req = next_request(&dep->started_list);
 		req = next_request(&dep->started_list);
 		if (WARN_ON_ONCE(!req))
 		if (WARN_ON_ONCE(!req))
 			return 1;
 			return 1;
 
 
+		chain = req->request.num_mapped_sgs > 0;
 		i = 0;
 		i = 0;
 		do {
 		do {
 			slot = req->first_trb_index + i;
 			slot = req->first_trb_index + i;
@@ -2054,13 +2056,22 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
 				slot++;
 				slot++;
 			slot %= DWC3_TRB_NUM;
 			slot %= DWC3_TRB_NUM;
 			trb = &dep->trb_pool[slot];
 			trb = &dep->trb_pool[slot];
+			count += trb->size & DWC3_TRB_SIZE_MASK;
 
 
 			ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
 			ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
-					event, status);
+					event, status, chain);
 			if (ret)
 			if (ret)
 				break;
 				break;
 		} while (++i < req->request.num_mapped_sgs);
 		} while (++i < req->request.num_mapped_sgs);
 
 
+		/*
+		 * We assume here we will always receive the entire data block
+		 * which we should receive. Meaning, if we program RX to
+		 * receive 4K but we receive only 2K, we assume that's all we
+		 * should receive and we simply bounce the request back to the
+		 * gadget driver for further processing.
+		 */
+		req->request.actual += req->request.length - count;
 		dwc3_gadget_giveback(dep, req, status);
 		dwc3_gadget_giveback(dep, req, status);
 
 
 		if (ret)
 		if (ret)

+ 4 - 2
drivers/usb/gadget/composite.c

@@ -1913,6 +1913,8 @@ unknown:
 			break;
 			break;
 
 
 		case USB_RECIP_ENDPOINT:
 		case USB_RECIP_ENDPOINT:
+			if (!cdev->config)
+				break;
 			endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f);
 			endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f);
 			list_for_each_entry(f, &cdev->config->functions, list) {
 			list_for_each_entry(f, &cdev->config->functions, list) {
 				if (test_bit(endp, f->endpoints))
 				if (test_bit(endp, f->endpoints))
@@ -2124,14 +2126,14 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev,
 
 
 	cdev->os_desc_req = usb_ep_alloc_request(ep0, GFP_KERNEL);
 	cdev->os_desc_req = usb_ep_alloc_request(ep0, GFP_KERNEL);
 	if (!cdev->os_desc_req) {
 	if (!cdev->os_desc_req) {
-		ret = PTR_ERR(cdev->os_desc_req);
+		ret = -ENOMEM;
 		goto end;
 		goto end;
 	}
 	}
 
 
 	/* OS feature descriptor length <= 4kB */
 	/* OS feature descriptor length <= 4kB */
 	cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL);
 	cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL);
 	if (!cdev->os_desc_req->buf) {
 	if (!cdev->os_desc_req->buf) {
-		ret = PTR_ERR(cdev->os_desc_req->buf);
+		ret = -ENOMEM;
 		kfree(cdev->os_desc_req);
 		kfree(cdev->os_desc_req);
 		goto end;
 		goto end;
 	}
 	}

+ 2 - 0
drivers/usb/gadget/configfs.c

@@ -1490,7 +1490,9 @@ void unregister_gadget_item(struct config_item *item)
 {
 {
 	struct gadget_info *gi = to_gadget_info(item);
 	struct gadget_info *gi = to_gadget_info(item);
 
 
+	mutex_lock(&gi->lock);
 	unregister_gadget(gi);
 	unregister_gadget(gi);
+	mutex_unlock(&gi->lock);
 }
 }
 EXPORT_SYMBOL_GPL(unregister_gadget_item);
 EXPORT_SYMBOL_GPL(unregister_gadget_item);
 
 

+ 6 - 0
drivers/usb/gadget/function/rndis.c

@@ -680,6 +680,12 @@ static int rndis_reset_response(struct rndis_params *params,
 {
 {
 	rndis_reset_cmplt_type *resp;
 	rndis_reset_cmplt_type *resp;
 	rndis_resp_t *r;
 	rndis_resp_t *r;
+	u8 *xbuf;
+	u32 length;
+
+	/* drain the response queue */
+	while ((xbuf = rndis_get_next_response(params, &length)))
+		rndis_free_response(params, xbuf);
 
 
 	r = rndis_add_response(params, sizeof(rndis_reset_cmplt_type));
 	r = rndis_add_response(params, sizeof(rndis_reset_cmplt_type));
 	if (!r)
 	if (!r)

+ 2 - 1
drivers/usb/gadget/function/u_ether.c

@@ -556,7 +556,8 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
 			/* Multi frame CDC protocols may store the frame for
 			/* Multi frame CDC protocols may store the frame for
 			 * later which is not a dropped frame.
 			 * later which is not a dropped frame.
 			 */
 			 */
-			if (dev->port_usb->supports_multi_frame)
+			if (dev->port_usb &&
+					dev->port_usb->supports_multi_frame)
 				goto multiframe;
 				goto multiframe;
 			goto drop;
 			goto drop;
 		}
 		}

+ 1 - 1
drivers/usb/gadget/function/uvc_configfs.c

@@ -2023,7 +2023,7 @@ static int uvcg_streaming_class_allow_link(struct config_item *src,
 	if (!data) {
 	if (!data) {
 		kfree(*class_array);
 		kfree(*class_array);
 		*class_array = NULL;
 		*class_array = NULL;
-		ret = PTR_ERR(data);
+		ret = -ENOMEM;
 		goto unlock;
 		goto unlock;
 	}
 	}
 	cl_arr = *class_array;
 	cl_arr = *class_array;

+ 2 - 2
drivers/usb/gadget/legacy/inode.c

@@ -542,7 +542,7 @@ static ssize_t ep_aio(struct kiocb *iocb,
 	 */
 	 */
 	spin_lock_irq(&epdata->dev->lock);
 	spin_lock_irq(&epdata->dev->lock);
 	value = -ENODEV;
 	value = -ENODEV;
-	if (unlikely(epdata->ep))
+	if (unlikely(epdata->ep == NULL))
 		goto fail;
 		goto fail;
 
 
 	req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
 	req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
@@ -606,7 +606,7 @@ ep_read_iter(struct kiocb *iocb, struct iov_iter *to)
 	}
 	}
 	if (is_sync_kiocb(iocb)) {
 	if (is_sync_kiocb(iocb)) {
 		value = ep_io(epdata, buf, len);
 		value = ep_io(epdata, buf, len);
-		if (value >= 0 && copy_to_iter(buf, value, to))
+		if (value >= 0 && (copy_to_iter(buf, value, to) != value))
 			value = -EFAULT;
 			value = -EFAULT;
 	} else {
 	} else {
 		struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
 		struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);

+ 4 - 1
drivers/usb/gadget/udc/core.c

@@ -1145,7 +1145,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
 			if (ret != -EPROBE_DEFER)
 			if (ret != -EPROBE_DEFER)
 				list_del(&driver->pending);
 				list_del(&driver->pending);
 			if (ret)
 			if (ret)
-				goto err4;
+				goto err5;
 			break;
 			break;
 		}
 		}
 	}
 	}
@@ -1154,6 +1154,9 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
 
 
 	return 0;
 	return 0;
 
 
+err5:
+	device_del(&udc->dev);
+
 err4:
 err4:
 	list_del(&udc->list);
 	list_del(&udc->list);
 	mutex_unlock(&udc_lock);
 	mutex_unlock(&udc_lock);

+ 1 - 1
drivers/usb/gadget/udc/fsl_qe_udc.c

@@ -2053,7 +2053,7 @@ static void setup_received_handle(struct qe_udc *udc,
 			struct qe_ep *ep;
 			struct qe_ep *ep;
 
 
 			if (wValue != 0 || wLength != 0
 			if (wValue != 0 || wLength != 0
-				|| pipe > USB_MAX_ENDPOINTS)
+				|| pipe >= USB_MAX_ENDPOINTS)
 				break;
 				break;
 			ep = &udc->eps[pipe];
 			ep = &udc->eps[pipe];
 
 

+ 2 - 2
drivers/usb/host/ehci-hcd.c

@@ -332,11 +332,11 @@ static void ehci_turn_off_all_ports(struct ehci_hcd *ehci)
 	int	port = HCS_N_PORTS(ehci->hcs_params);
 	int	port = HCS_N_PORTS(ehci->hcs_params);
 
 
 	while (port--) {
 	while (port--) {
-		ehci_writel(ehci, PORT_RWC_BITS,
-				&ehci->regs->port_status[port]);
 		spin_unlock_irq(&ehci->lock);
 		spin_unlock_irq(&ehci->lock);
 		ehci_port_power(ehci, port, false);
 		ehci_port_power(ehci, port, false);
 		spin_lock_irq(&ehci->lock);
 		spin_lock_irq(&ehci->lock);
+		ehci_writel(ehci, PORT_RWC_BITS,
+				&ehci->regs->port_status[port]);
 	}
 	}
 }
 }
 
 

+ 1 - 1
drivers/usb/host/max3421-hcd.c

@@ -1675,7 +1675,7 @@ max3421_gpout_set_value(struct usb_hcd *hcd, u8 pin_number, u8 value)
 	if (pin_number > 7)
 	if (pin_number > 7)
 		return;
 		return;
 
 
-	mask = 1u << pin_number;
+	mask = 1u << (pin_number % 4);
 	idx = pin_number / 4;
 	idx = pin_number / 4;
 
 
 	if (value)
 	if (value)

+ 3 - 0
drivers/usb/host/xhci-hub.c

@@ -386,6 +386,9 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
 
 
 	ret = 0;
 	ret = 0;
 	virt_dev = xhci->devs[slot_id];
 	virt_dev = xhci->devs[slot_id];
+	if (!virt_dev)
+		return -ENODEV;
+
 	cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
 	cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
 	if (!cmd) {
 	if (!cmd) {
 		xhci_dbg(xhci, "Couldn't allocate command structure.\n");
 		xhci_dbg(xhci, "Couldn't allocate command structure.\n");

+ 2 - 1
drivers/usb/host/xhci-pci.c

@@ -314,11 +314,12 @@ static void xhci_pci_remove(struct pci_dev *dev)
 		usb_remove_hcd(xhci->shared_hcd);
 		usb_remove_hcd(xhci->shared_hcd);
 		usb_put_hcd(xhci->shared_hcd);
 		usb_put_hcd(xhci->shared_hcd);
 	}
 	}
-	usb_hcd_pci_remove(dev);
 
 
 	/* Workaround for spurious wakeups at shutdown with HSW */
 	/* Workaround for spurious wakeups at shutdown with HSW */
 	if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
 	if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
 		pci_set_power_state(dev, PCI_D3hot);
 		pci_set_power_state(dev, PCI_D3hot);
+
+	usb_hcd_pci_remove(dev);
 }
 }
 
 
 #ifdef CONFIG_PM
 #ifdef CONFIG_PM

+ 9 - 7
drivers/usb/host/xhci-ring.c

@@ -1334,12 +1334,6 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
 
 
 	cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);
 	cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);
 
 
-	if (cmd->command_trb != xhci->cmd_ring->dequeue) {
-		xhci_err(xhci,
-			 "Command completion event does not match command\n");
-		return;
-	}
-
 	del_timer(&xhci->cmd_timer);
 	del_timer(&xhci->cmd_timer);
 
 
 	trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
 	trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
@@ -1351,6 +1345,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
 		xhci_handle_stopped_cmd_ring(xhci, cmd);
 		xhci_handle_stopped_cmd_ring(xhci, cmd);
 		return;
 		return;
 	}
 	}
+
+	if (cmd->command_trb != xhci->cmd_ring->dequeue) {
+		xhci_err(xhci,
+			 "Command completion event does not match command\n");
+		return;
+	}
+
 	/*
 	/*
 	 * Host aborted the command ring, check if the current command was
 	 * Host aborted the command ring, check if the current command was
 	 * supposed to be aborted, otherwise continue normally.
 	 * supposed to be aborted, otherwise continue normally.
@@ -3243,7 +3244,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
 	send_addr = addr;
 	send_addr = addr;
 
 
 	/* Queue the TRBs, even if they are zero-length */
 	/* Queue the TRBs, even if they are zero-length */
-	for (enqd_len = 0; enqd_len < full_len; enqd_len += trb_buff_len) {
+	for (enqd_len = 0; first_trb || enqd_len < full_len;
+			enqd_len += trb_buff_len) {
 		field = TRB_TYPE(TRB_NORMAL);
 		field = TRB_TYPE(TRB_NORMAL);
 
 
 		/* TRB buffer should not cross 64KB boundaries */
 		/* TRB buffer should not cross 64KB boundaries */

+ 5 - 5
drivers/usb/misc/ftdi-elan.c

@@ -665,7 +665,7 @@ static ssize_t ftdi_elan_read(struct file *file, char __user *buffer,
 {
 {
 	char data[30 *3 + 4];
 	char data[30 *3 + 4];
 	char *d = data;
 	char *d = data;
-	int m = (sizeof(data) - 1) / 3;
+	int m = (sizeof(data) - 1) / 3 - 1;
 	int bytes_read = 0;
 	int bytes_read = 0;
 	int retry_on_empty = 10;
 	int retry_on_empty = 10;
 	int retry_on_timeout = 5;
 	int retry_on_timeout = 5;
@@ -1684,7 +1684,7 @@ wait:if (ftdi->disconnected > 0) {
 			int i = 0;
 			int i = 0;
 			char data[30 *3 + 4];
 			char data[30 *3 + 4];
 			char *d = data;
 			char *d = data;
-			int m = (sizeof(data) - 1) / 3;
+			int m = (sizeof(data) - 1) / 3 - 1;
 			int l = 0;
 			int l = 0;
 			struct u132_target *target = &ftdi->target[ed];
 			struct u132_target *target = &ftdi->target[ed];
 			struct u132_command *command = &ftdi->command[
 			struct u132_command *command = &ftdi->command[
@@ -1876,7 +1876,7 @@ more:{
 		if (packet_bytes > 2) {
 		if (packet_bytes > 2) {
 			char diag[30 *3 + 4];
 			char diag[30 *3 + 4];
 			char *d = diag;
 			char *d = diag;
-			int m = (sizeof(diag) - 1) / 3;
+			int m = (sizeof(diag) - 1) / 3 - 1;
 			char *b = ftdi->bulk_in_buffer;
 			char *b = ftdi->bulk_in_buffer;
 			int bytes_read = 0;
 			int bytes_read = 0;
 			diag[0] = 0;
 			diag[0] = 0;
@@ -2053,7 +2053,7 @@ static int ftdi_elan_synchronize(struct usb_ftdi *ftdi)
 			if (packet_bytes > 2) {
 			if (packet_bytes > 2) {
 				char diag[30 *3 + 4];
 				char diag[30 *3 + 4];
 				char *d = diag;
 				char *d = diag;
-				int m = (sizeof(diag) - 1) / 3;
+				int m = (sizeof(diag) - 1) / 3 - 1;
 				char *b = ftdi->bulk_in_buffer;
 				char *b = ftdi->bulk_in_buffer;
 				int bytes_read = 0;
 				int bytes_read = 0;
 				unsigned char c = 0;
 				unsigned char c = 0;
@@ -2155,7 +2155,7 @@ more:{
 		if (packet_bytes > 2) {
 		if (packet_bytes > 2) {
 			char diag[30 *3 + 4];
 			char diag[30 *3 + 4];
 			char *d = diag;
 			char *d = diag;
-			int m = (sizeof(diag) - 1) / 3;
+			int m = (sizeof(diag) - 1) / 3 - 1;
 			char *b = ftdi->bulk_in_buffer;
 			char *b = ftdi->bulk_in_buffer;
 			int bytes_read = 0;
 			int bytes_read = 0;
 			diag[0] = 0;
 			diag[0] = 0;

Некоторые файлы не были показаны из-за большого количества измененных файлов