فهرست منبع

Merge tag 'v4.10-rc7' into efi/core, to pick up fixes

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Ingo Molnar 8 سال پیش
والد
کامیت
87a8d03266
100فایلهای تغییر یافته به همراه932 افزوده شده و 856 حذف شده
  1. 3 0
      Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
  2. 1 1
      Documentation/devicetree/bindings/net/mediatek-net.txt
  3. 3 2
      Documentation/devicetree/bindings/net/phy.txt
  4. 1 3
      Documentation/power/states.txt
  5. 3 4
      MAINTAINERS
  6. 3 3
      Makefile
  7. 3 1
      arch/arc/include/asm/delay.h
  8. 7 7
      arch/arc/kernel/head.S
  9. 23 32
      arch/arc/kernel/mcip.c
  10. 20 5
      arch/arc/kernel/smp.c
  11. 2 1
      arch/arc/kernel/unaligned.c
  12. 42 46
      arch/arm64/crypto/aes-modes.S
  13. 7 1
      arch/arm64/kernel/topology.c
  14. 7 1
      arch/parisc/include/asm/bitops.h
  15. 0 2
      arch/parisc/include/uapi/asm/bitsperlong.h
  16. 3 2
      arch/parisc/include/uapi/asm/swab.h
  17. 1 1
      arch/powerpc/Kconfig
  18. 2 0
      arch/powerpc/include/asm/cpu_has_feature.h
  19. 2 0
      arch/powerpc/include/asm/mmu.h
  20. 0 4
      arch/powerpc/include/asm/module.h
  21. 0 40
      arch/powerpc/include/asm/stackprotector.h
  22. 0 4
      arch/powerpc/kernel/Makefile
  23. 0 3
      arch/powerpc/kernel/asm-offsets.c
  24. 1 1
      arch/powerpc/kernel/eeh_driver.c
  25. 1 5
      arch/powerpc/kernel/entry_32.S
  26. 0 8
      arch/powerpc/kernel/module_64.c
  27. 0 6
      arch/powerpc/kernel/process.c
  28. 3 0
      arch/powerpc/kernel/prom_init.c
  29. 2 2
      arch/powerpc/mm/pgtable-radix.c
  30. 8 0
      arch/s390/kernel/ptrace.c
  31. 4 3
      arch/s390/mm/pgtable.c
  32. 4 4
      arch/sparc/include/asm/mmu_context_64.h
  33. 1 1
      arch/sparc/kernel/irq_64.c
  34. 3 3
      arch/sparc/kernel/sstate.c
  35. 73 0
      arch/sparc/kernel/traps_64.c
  36. 26 34
      arch/x86/events/intel/rapl.c
  37. 91 141
      arch/x86/events/intel/uncore.c
  38. 1 0
      arch/x86/include/asm/microcode.h
  39. 2 0
      arch/x86/kernel/apic/io_apic.c
  40. 12 19
      arch/x86/kernel/cpu/mcheck/mce.c
  41. 3 2
      arch/x86/kernel/cpu/microcode/amd.c
  42. 17 5
      arch/x86/kernel/cpu/microcode/core.c
  43. 1 8
      arch/x86/kernel/cpu/microcode/intel.c
  44. 3 1
      arch/x86/kernel/fpu/core.c
  45. 1 0
      arch/x86/kernel/hpet.c
  46. 1 0
      arch/x86/kvm/x86.c
  47. 1 1
      arch/xtensa/kernel/setup.c
  48. 1 0
      crypto/algapi.c
  49. 2 7
      drivers/acpi/acpica/tbdata.c
  50. 15 2
      drivers/acpi/acpica/tbinstal.c
  51. 0 8
      drivers/acpi/sleep.c
  52. 0 11
      drivers/acpi/video_detect.c
  53. 4 2
      drivers/ata/libata-core.c
  54. 3 0
      drivers/ata/sata_mv.c
  55. 1 4
      drivers/base/firmware_class.c
  56. 6 6
      drivers/base/memory.c
  57. 3 0
      drivers/bcma/bcma_private.h
  58. 3 8
      drivers/bcma/driver_chipcommon.c
  59. 3 0
      drivers/bcma/driver_mips.c
  60. 14 8
      drivers/block/xen-blkfront.c
  61. 13 1
      drivers/cpufreq/intel_pstate.c
  62. 46 23
      drivers/dma/cppi41.c
  63. 6 13
      drivers/dma/pl330.c
  64. 3 11
      drivers/firmware/efi/libstub/fdt.c
  65. 7 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
  66. 1 4
      drivers/gpu/drm/amd/amdgpu/dce_virtual.c
  67. 3 1
      drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
  68. 1 0
      drivers/gpu/drm/ast/ast_drv.h
  69. 83 74
      drivers/gpu/drm/ast/ast_main.c
  70. 13 5
      drivers/gpu/drm/ast/ast_post.c
  71. 14 11
      drivers/gpu/drm/drm_atomic.c
  72. 0 9
      drivers/gpu/drm/drm_atomic_helper.c
  73. 18 5
      drivers/gpu/drm/drm_connector.c
  74. 4 0
      drivers/gpu/drm/drm_drv.c
  75. 31 20
      drivers/gpu/drm/drm_probe_helper.c
  76. 0 4
      drivers/gpu/drm/i915/gvt/cmd_parser.c
  77. 19 47
      drivers/gpu/drm/i915/gvt/execlist.c
  78. 4 4
      drivers/gpu/drm/i915/gvt/kvmgt.c
  79. 1 1
      drivers/gpu/drm/i915/gvt/scheduler.h
  80. 1 1
      drivers/gpu/drm/i915/i915_drv.c
  81. 9 12
      drivers/gpu/drm/i915/i915_drv.h
  82. 1 0
      drivers/gpu/drm/i915/i915_vma.c
  83. 20 0
      drivers/gpu/drm/i915/intel_atomic_plane.c
  84. 5 4
      drivers/gpu/drm/i915/intel_crt.c
  85. 82 87
      drivers/gpu/drm/i915/intel_display.c
  86. 9 2
      drivers/gpu/drm/i915/intel_drv.h
  87. 20 32
      drivers/gpu/drm/i915/intel_fbc.c
  88. 5 2
      drivers/gpu/drm/i915/intel_fbdev.c
  89. 2 2
      drivers/gpu/drm/i915/intel_hotplug.c
  90. 4 4
      drivers/gpu/drm/i915/intel_sprite.c
  91. 2 1
      drivers/gpu/drm/nouveau/dispnv04/hw.c
  92. 2 1
      drivers/gpu/drm/nouveau/nouveau_display.c
  93. 4 1
      drivers/gpu/drm/nouveau/nouveau_drm.c
  94. 2 0
      drivers/gpu/drm/nouveau/nouveau_drv.h
  95. 34 9
      drivers/gpu/drm/nouveau/nouveau_fbcon.c
  96. 1 0
      drivers/gpu/drm/nouveau/nouveau_fence.h
  97. 1 1
      drivers/gpu/drm/nouveau/nouveau_led.h
  98. 2 1
      drivers/gpu/drm/nouveau/nouveau_usif.c
  99. 6 0
      drivers/gpu/drm/nouveau/nv50_display.c
  100. 6 0
      drivers/gpu/drm/nouveau/nv84_fence.c

+ 3 - 0
Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt

@@ -15,6 +15,9 @@ Properties:
   Second cell specifies the irq distribution mode to cores
   Second cell specifies the irq distribution mode to cores
      0=Round Robin; 1=cpu0, 2=cpu1, 4=cpu2, 8=cpu3
      0=Round Robin; 1=cpu0, 2=cpu1, 4=cpu2, 8=cpu3
 
 
+  The second cell in interrupts property is deprecated and may be ignored by
+  the kernel.
+
   intc accessed via the special ARC AUX register interface, hence "reg" property
   intc accessed via the special ARC AUX register interface, hence "reg" property
   is not specified.
   is not specified.
 
 

+ 1 - 1
Documentation/devicetree/bindings/net/mediatek-net.txt

@@ -7,7 +7,7 @@ have dual GMAC each represented by a child node..
 * Ethernet controller node
 * Ethernet controller node
 
 
 Required properties:
 Required properties:
-- compatible: Should be "mediatek,mt7623-eth"
+- compatible: Should be "mediatek,mt2701-eth"
 - reg: Address and length of the register set for the device
 - reg: Address and length of the register set for the device
 - interrupts: Should contain the three frame engines interrupts in numeric
 - interrupts: Should contain the three frame engines interrupts in numeric
 	order. These are fe_int0, fe_int1 and fe_int2.
 	order. These are fe_int0, fe_int1 and fe_int2.

+ 3 - 2
Documentation/devicetree/bindings/net/phy.txt

@@ -19,8 +19,9 @@ Optional Properties:
   specifications. If neither of these are specified, the default is to
   specifications. If neither of these are specified, the default is to
   assume clause 22.
   assume clause 22.
 
 
-  If the phy's identifier is known then the list may contain an entry
-  of the form: "ethernet-phy-idAAAA.BBBB" where
+  If the PHY reports an incorrect ID (or none at all) then the
+  "compatible" list may contain an entry with the correct PHY ID in the
+  form: "ethernet-phy-idAAAA.BBBB" where
      AAAA - The value of the 16 bit Phy Identifier 1 register as
      AAAA - The value of the 16 bit Phy Identifier 1 register as
             4 hex digits. This is the chip vendor OUI bits 3:18
             4 hex digits. This is the chip vendor OUI bits 3:18
      BBBB - The value of the 16 bit Phy Identifier 2 register as
      BBBB - The value of the 16 bit Phy Identifier 2 register as

+ 1 - 3
Documentation/power/states.txt

@@ -35,9 +35,7 @@ only one way to cause the system to go into the Suspend-To-RAM state (write
 The default suspend mode (ie. the one to be used without writing anything into
 The default suspend mode (ie. the one to be used without writing anything into
 /sys/power/mem_sleep) is either "deep" (if Suspend-To-RAM is supported) or
 /sys/power/mem_sleep) is either "deep" (if Suspend-To-RAM is supported) or
 "s2idle", but it can be overridden by the value of the "mem_sleep_default"
 "s2idle", but it can be overridden by the value of the "mem_sleep_default"
-parameter in the kernel command line.  On some ACPI-based systems, depending on
-the information in the FADT, the default may be "s2idle" even if Suspend-To-RAM
-is supported.
+parameter in the kernel command line.
 
 
 The properties of all of the sleep states are described below.
 The properties of all of the sleep states are described below.
 
 

+ 3 - 4
MAINTAINERS

@@ -3567,7 +3567,7 @@ F:	drivers/infiniband/hw/cxgb3/
 F:	include/uapi/rdma/cxgb3-abi.h
 F:	include/uapi/rdma/cxgb3-abi.h
 
 
 CXGB4 ETHERNET DRIVER (CXGB4)
 CXGB4 ETHERNET DRIVER (CXGB4)
-M:	Hariprasad S <hariprasad@chelsio.com>
+M:	Ganesh Goudar <ganeshgr@chelsio.com>
 L:	netdev@vger.kernel.org
 L:	netdev@vger.kernel.org
 W:	http://www.chelsio.com
 W:	http://www.chelsio.com
 S:	Supported
 S:	Supported
@@ -4153,7 +4153,7 @@ F:	Documentation/gpu/i915.rst
 INTEL GVT-g DRIVERS (Intel GPU Virtualization)
 INTEL GVT-g DRIVERS (Intel GPU Virtualization)
 M:      Zhenyu Wang <zhenyuw@linux.intel.com>
 M:      Zhenyu Wang <zhenyuw@linux.intel.com>
 M:      Zhi Wang <zhi.a.wang@intel.com>
 M:      Zhi Wang <zhi.a.wang@intel.com>
-L:      igvt-g-dev@lists.01.org
+L:      intel-gvt-dev@lists.freedesktop.org
 L:      intel-gfx@lists.freedesktop.org
 L:      intel-gfx@lists.freedesktop.org
 W:      https://01.org/igvt-g
 W:      https://01.org/igvt-g
 T:      git https://github.com/01org/gvt-linux.git
 T:      git https://github.com/01org/gvt-linux.git
@@ -10195,7 +10195,6 @@ F:	drivers/media/tuners/qt1010*
 QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
 QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
 M:	QCA ath9k Development <ath9k-devel@qca.qualcomm.com>
 M:	QCA ath9k Development <ath9k-devel@qca.qualcomm.com>
 L:	linux-wireless@vger.kernel.org
 L:	linux-wireless@vger.kernel.org
-L:	ath9k-devel@lists.ath9k.org
 W:	http://wireless.kernel.org/en/users/Drivers/ath9k
 W:	http://wireless.kernel.org/en/users/Drivers/ath9k
 S:	Supported
 S:	Supported
 F:	drivers/net/wireless/ath/ath9k/
 F:	drivers/net/wireless/ath/ath9k/
@@ -13066,7 +13065,7 @@ F:	drivers/input/serio/userio.c
 F:	include/uapi/linux/userio.h
 F:	include/uapi/linux/userio.h
 
 
 VIRTIO CONSOLE DRIVER
 VIRTIO CONSOLE DRIVER
-M:	Amit Shah <amit.shah@redhat.com>
+M:	Amit Shah <amit@kernel.org>
 L:	virtualization@lists.linux-foundation.org
 L:	virtualization@lists.linux-foundation.org
 S:	Maintained
 S:	Maintained
 F:	drivers/char/virtio_console.c
 F:	drivers/char/virtio_console.c

+ 3 - 3
Makefile

@@ -1,8 +1,8 @@
 VERSION = 4
 VERSION = 4
 PATCHLEVEL = 10
 PATCHLEVEL = 10
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
-NAME = Anniversary Edition
+EXTRAVERSION = -rc7
+NAME = Fearless Coyote
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
 # To see a list of typical targets execute "make help"
@@ -797,7 +797,7 @@ KBUILD_CFLAGS   += $(call cc-option,-Werror=incompatible-pointer-types)
 KBUILD_ARFLAGS := $(call ar-option,D)
 KBUILD_ARFLAGS := $(call ar-option,D)
 
 
 # check for 'asm goto'
 # check for 'asm goto'
-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
 	KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
 	KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
 	KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
 	KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
 endif
 endif

+ 3 - 1
arch/arc/include/asm/delay.h

@@ -26,7 +26,9 @@ static inline void __delay(unsigned long loops)
 	"	lp  1f			\n"
 	"	lp  1f			\n"
 	"	nop			\n"
 	"	nop			\n"
 	"1:				\n"
 	"1:				\n"
-	: : "r"(loops));
+	:
+        : "r"(loops)
+        : "lp_count");
 }
 }
 
 
 extern void __bad_udelay(void);
 extern void __bad_udelay(void);

+ 7 - 7
arch/arc/kernel/head.S

@@ -71,14 +71,14 @@ ENTRY(stext)
 	GET_CPU_ID  r5
 	GET_CPU_ID  r5
 	cmp	r5, 0
 	cmp	r5, 0
 	mov.nz	r0, r5
 	mov.nz	r0, r5
-#ifdef CONFIG_ARC_SMP_HALT_ON_RESET
-	; Non-Master can proceed as system would be booted sufficiently
-	jnz	first_lines_of_secondary
-#else
+	bz	.Lmaster_proceed
+
 	; Non-Masters wait for Master to boot enough and bring them up
 	; Non-Masters wait for Master to boot enough and bring them up
-	jnz	arc_platform_smp_wait_to_boot
-#endif
-	; Master falls thru
+	; when they resume, tail-call to entry point
+	mov	blink, @first_lines_of_secondary
+	j	arc_platform_smp_wait_to_boot
+
+.Lmaster_proceed:
 #endif
 #endif
 
 
 	; Clear BSS before updating any globals
 	; Clear BSS before updating any globals

+ 23 - 32
arch/arc/kernel/mcip.c

@@ -93,11 +93,10 @@ static void mcip_probe_n_setup(void)
 	READ_BCR(ARC_REG_MCIP_BCR, mp);
 	READ_BCR(ARC_REG_MCIP_BCR, mp);
 
 
 	sprintf(smp_cpuinfo_buf,
 	sprintf(smp_cpuinfo_buf,
-		"Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s%s\n",
+		"Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n",
 		mp.ver, mp.num_cores,
 		mp.ver, mp.num_cores,
 		IS_AVAIL1(mp.ipi, "IPI "),
 		IS_AVAIL1(mp.ipi, "IPI "),
 		IS_AVAIL1(mp.idu, "IDU "),
 		IS_AVAIL1(mp.idu, "IDU "),
-		IS_AVAIL1(mp.llm, "LLM "),
 		IS_AVAIL1(mp.dbg, "DEBUG "),
 		IS_AVAIL1(mp.dbg, "DEBUG "),
 		IS_AVAIL1(mp.gfrc, "GFRC"));
 		IS_AVAIL1(mp.gfrc, "GFRC"));
 
 
@@ -175,7 +174,6 @@ static void idu_irq_unmask(struct irq_data *data)
 	raw_spin_unlock_irqrestore(&mcip_lock, flags);
 	raw_spin_unlock_irqrestore(&mcip_lock, flags);
 }
 }
 
 
-#ifdef CONFIG_SMP
 static int
 static int
 idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
 idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
 		     bool force)
 		     bool force)
@@ -205,12 +203,27 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
 
 
 	return IRQ_SET_MASK_OK;
 	return IRQ_SET_MASK_OK;
 }
 }
-#endif
+
+static void idu_irq_enable(struct irq_data *data)
+{
+	/*
+	 * By default send all common interrupts to all available online CPUs.
+	 * The affinity of common interrupts in IDU must be set manually since
+	 * in some cases the kernel will not call irq_set_affinity() by itself:
+	 *   1. When the kernel is not configured with support of SMP.
+	 *   2. When the kernel is configured with support of SMP but upper
+	 *      interrupt controllers does not support setting of the affinity
+	 *      and cannot propagate it to IDU.
+	 */
+	idu_irq_set_affinity(data, cpu_online_mask, false);
+	idu_irq_unmask(data);
+}
 
 
 static struct irq_chip idu_irq_chip = {
 static struct irq_chip idu_irq_chip = {
 	.name			= "MCIP IDU Intc",
 	.name			= "MCIP IDU Intc",
 	.irq_mask		= idu_irq_mask,
 	.irq_mask		= idu_irq_mask,
 	.irq_unmask		= idu_irq_unmask,
 	.irq_unmask		= idu_irq_unmask,
+	.irq_enable		= idu_irq_enable,
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
 	.irq_set_affinity       = idu_irq_set_affinity,
 	.irq_set_affinity       = idu_irq_set_affinity,
 #endif
 #endif
@@ -243,36 +256,14 @@ static int idu_irq_xlate(struct irq_domain *d, struct device_node *n,
 			 const u32 *intspec, unsigned int intsize,
 			 const u32 *intspec, unsigned int intsize,
 			 irq_hw_number_t *out_hwirq, unsigned int *out_type)
 			 irq_hw_number_t *out_hwirq, unsigned int *out_type)
 {
 {
-	irq_hw_number_t hwirq = *out_hwirq = intspec[0];
-	int distri = intspec[1];
-	unsigned long flags;
-
+	/*
+	 * Ignore value of interrupt distribution mode for common interrupts in
+	 * IDU which resides in intspec[1] since setting an affinity using value
+	 * from Device Tree is deprecated in ARC.
+	 */
+	*out_hwirq = intspec[0];
 	*out_type = IRQ_TYPE_NONE;
 	*out_type = IRQ_TYPE_NONE;
 
 
-	/* XXX: validate distribution scheme again online cpu mask */
-	if (distri == 0) {
-		/* 0 - Round Robin to all cpus, otherwise 1 bit per core */
-		raw_spin_lock_irqsave(&mcip_lock, flags);
-		idu_set_dest(hwirq, BIT(num_online_cpus()) - 1);
-		idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
-		raw_spin_unlock_irqrestore(&mcip_lock, flags);
-	} else {
-		/*
-		 * DEST based distribution for Level Triggered intr can only
-		 * have 1 CPU, so generalize it to always contain 1 cpu
-		 */
-		int cpu = ffs(distri);
-
-		if (cpu != fls(distri))
-			pr_warn("IDU irq %lx distri mode set to cpu %x\n",
-				hwirq, cpu);
-
-		raw_spin_lock_irqsave(&mcip_lock, flags);
-		idu_set_dest(hwirq, cpu);
-		idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_DEST);
-		raw_spin_unlock_irqrestore(&mcip_lock, flags);
-	}
-
 	return 0;
 	return 0;
 }
 }
 
 

+ 20 - 5
arch/arc/kernel/smp.c

@@ -90,22 +90,37 @@ void __init smp_cpus_done(unsigned int max_cpus)
  */
  */
 static volatile int wake_flag;
 static volatile int wake_flag;
 
 
+#ifdef CONFIG_ISA_ARCOMPACT
+
+#define __boot_read(f)		f
+#define __boot_write(f, v)	f = v
+
+#else
+
+#define __boot_read(f)		arc_read_uncached_32(&f)
+#define __boot_write(f, v)	arc_write_uncached_32(&f, v)
+
+#endif
+
 static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
 static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
 {
 {
 	BUG_ON(cpu == 0);
 	BUG_ON(cpu == 0);
-	wake_flag = cpu;
+
+	__boot_write(wake_flag, cpu);
 }
 }
 
 
 void arc_platform_smp_wait_to_boot(int cpu)
 void arc_platform_smp_wait_to_boot(int cpu)
 {
 {
-	while (wake_flag != cpu)
+	/* for halt-on-reset, we've waited already */
+	if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET))
+		return;
+
+	while (__boot_read(wake_flag) != cpu)
 		;
 		;
 
 
-	wake_flag = 0;
-	__asm__ __volatile__("j @first_lines_of_secondary	\n");
+	__boot_write(wake_flag, 0);
 }
 }
 
 
-
 const char *arc_platform_smp_cpuinfo(void)
 const char *arc_platform_smp_cpuinfo(void)
 {
 {
 	return plat_smp_ops.info ? : "";
 	return plat_smp_ops.info ? : "";

+ 2 - 1
arch/arc/kernel/unaligned.c

@@ -241,8 +241,9 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
 	if (state.fault)
 	if (state.fault)
 		goto fault;
 		goto fault;
 
 
+	/* clear any remanants of delay slot */
 	if (delay_mode(regs)) {
 	if (delay_mode(regs)) {
-		regs->ret = regs->bta;
+		regs->ret = regs->bta ~1U;
 		regs->status32 &= ~STATUS_DE_MASK;
 		regs->status32 &= ~STATUS_DE_MASK;
 	} else {
 	} else {
 		regs->ret += state.instr_len;
 		regs->ret += state.instr_len;

+ 42 - 46
arch/arm64/crypto/aes-modes.S

@@ -193,15 +193,16 @@ AES_ENTRY(aes_cbc_encrypt)
 	cbz		w6, .Lcbcencloop
 	cbz		w6, .Lcbcencloop
 
 
 	ld1		{v0.16b}, [x5]			/* get iv */
 	ld1		{v0.16b}, [x5]			/* get iv */
-	enc_prepare	w3, x2, x5
+	enc_prepare	w3, x2, x6
 
 
 .Lcbcencloop:
 .Lcbcencloop:
 	ld1		{v1.16b}, [x1], #16		/* get next pt block */
 	ld1		{v1.16b}, [x1], #16		/* get next pt block */
 	eor		v0.16b, v0.16b, v1.16b		/* ..and xor with iv */
 	eor		v0.16b, v0.16b, v1.16b		/* ..and xor with iv */
-	encrypt_block	v0, w3, x2, x5, w6
+	encrypt_block	v0, w3, x2, x6, w7
 	st1		{v0.16b}, [x0], #16
 	st1		{v0.16b}, [x0], #16
 	subs		w4, w4, #1
 	subs		w4, w4, #1
 	bne		.Lcbcencloop
 	bne		.Lcbcencloop
+	st1		{v0.16b}, [x5]			/* return iv */
 	ret
 	ret
 AES_ENDPROC(aes_cbc_encrypt)
 AES_ENDPROC(aes_cbc_encrypt)
 
 
@@ -211,7 +212,7 @@ AES_ENTRY(aes_cbc_decrypt)
 	cbz		w6, .LcbcdecloopNx
 	cbz		w6, .LcbcdecloopNx
 
 
 	ld1		{v7.16b}, [x5]			/* get iv */
 	ld1		{v7.16b}, [x5]			/* get iv */
-	dec_prepare	w3, x2, x5
+	dec_prepare	w3, x2, x6
 
 
 .LcbcdecloopNx:
 .LcbcdecloopNx:
 #if INTERLEAVE >= 2
 #if INTERLEAVE >= 2
@@ -248,7 +249,7 @@ AES_ENTRY(aes_cbc_decrypt)
 .Lcbcdecloop:
 .Lcbcdecloop:
 	ld1		{v1.16b}, [x1], #16		/* get next ct block */
 	ld1		{v1.16b}, [x1], #16		/* get next ct block */
 	mov		v0.16b, v1.16b			/* ...and copy to v0 */
 	mov		v0.16b, v1.16b			/* ...and copy to v0 */
-	decrypt_block	v0, w3, x2, x5, w6
+	decrypt_block	v0, w3, x2, x6, w7
 	eor		v0.16b, v0.16b, v7.16b		/* xor with iv => pt */
 	eor		v0.16b, v0.16b, v7.16b		/* xor with iv => pt */
 	mov		v7.16b, v1.16b			/* ct is next iv */
 	mov		v7.16b, v1.16b			/* ct is next iv */
 	st1		{v0.16b}, [x0], #16
 	st1		{v0.16b}, [x0], #16
@@ -256,6 +257,7 @@ AES_ENTRY(aes_cbc_decrypt)
 	bne		.Lcbcdecloop
 	bne		.Lcbcdecloop
 .Lcbcdecout:
 .Lcbcdecout:
 	FRAME_POP
 	FRAME_POP
+	st1		{v7.16b}, [x5]			/* return iv */
 	ret
 	ret
 AES_ENDPROC(aes_cbc_decrypt)
 AES_ENDPROC(aes_cbc_decrypt)
 
 
@@ -267,24 +269,15 @@ AES_ENDPROC(aes_cbc_decrypt)
 
 
 AES_ENTRY(aes_ctr_encrypt)
 AES_ENTRY(aes_ctr_encrypt)
 	FRAME_PUSH
 	FRAME_PUSH
-	cbnz		w6, .Lctrfirst		/* 1st time around? */
-	umov		x5, v4.d[1]		/* keep swabbed ctr in reg */
-	rev		x5, x5
-#if INTERLEAVE >= 2
-	cmn		w5, w4			/* 32 bit overflow? */
-	bcs		.Lctrinc
-	add		x5, x5, #1		/* increment BE ctr */
-	b		.LctrincNx
-#else
-	b		.Lctrinc
-#endif
-.Lctrfirst:
+	cbz		w6, .Lctrnotfirst	/* 1st time around? */
 	enc_prepare	w3, x2, x6
 	enc_prepare	w3, x2, x6
 	ld1		{v4.16b}, [x5]
 	ld1		{v4.16b}, [x5]
-	umov		x5, v4.d[1]		/* keep swabbed ctr in reg */
-	rev		x5, x5
+
+.Lctrnotfirst:
+	umov		x8, v4.d[1]		/* keep swabbed ctr in reg */
+	rev		x8, x8
 #if INTERLEAVE >= 2
 #if INTERLEAVE >= 2
-	cmn		w5, w4			/* 32 bit overflow? */
+	cmn		w8, w4			/* 32 bit overflow? */
 	bcs		.Lctrloop
 	bcs		.Lctrloop
 .LctrloopNx:
 .LctrloopNx:
 	subs		w4, w4, #INTERLEAVE
 	subs		w4, w4, #INTERLEAVE
@@ -292,11 +285,11 @@ AES_ENTRY(aes_ctr_encrypt)
 #if INTERLEAVE == 2
 #if INTERLEAVE == 2
 	mov		v0.8b, v4.8b
 	mov		v0.8b, v4.8b
 	mov		v1.8b, v4.8b
 	mov		v1.8b, v4.8b
-	rev		x7, x5
-	add		x5, x5, #1
+	rev		x7, x8
+	add		x8, x8, #1
 	ins		v0.d[1], x7
 	ins		v0.d[1], x7
-	rev		x7, x5
-	add		x5, x5, #1
+	rev		x7, x8
+	add		x8, x8, #1
 	ins		v1.d[1], x7
 	ins		v1.d[1], x7
 	ld1		{v2.16b-v3.16b}, [x1], #32	/* get 2 input blocks */
 	ld1		{v2.16b-v3.16b}, [x1], #32	/* get 2 input blocks */
 	do_encrypt_block2x
 	do_encrypt_block2x
@@ -305,7 +298,7 @@ AES_ENTRY(aes_ctr_encrypt)
 	st1		{v0.16b-v1.16b}, [x0], #32
 	st1		{v0.16b-v1.16b}, [x0], #32
 #else
 #else
 	ldr		q8, =0x30000000200000001	/* addends 1,2,3[,0] */
 	ldr		q8, =0x30000000200000001	/* addends 1,2,3[,0] */
-	dup		v7.4s, w5
+	dup		v7.4s, w8
 	mov		v0.16b, v4.16b
 	mov		v0.16b, v4.16b
 	add		v7.4s, v7.4s, v8.4s
 	add		v7.4s, v7.4s, v8.4s
 	mov		v1.16b, v4.16b
 	mov		v1.16b, v4.16b
@@ -323,18 +316,12 @@ AES_ENTRY(aes_ctr_encrypt)
 	eor		v2.16b, v7.16b, v2.16b
 	eor		v2.16b, v7.16b, v2.16b
 	eor		v3.16b, v5.16b, v3.16b
 	eor		v3.16b, v5.16b, v3.16b
 	st1		{v0.16b-v3.16b}, [x0], #64
 	st1		{v0.16b-v3.16b}, [x0], #64
-	add		x5, x5, #INTERLEAVE
+	add		x8, x8, #INTERLEAVE
 #endif
 #endif
-	cbz		w4, .LctroutNx
-.LctrincNx:
-	rev		x7, x5
+	rev		x7, x8
 	ins		v4.d[1], x7
 	ins		v4.d[1], x7
+	cbz		w4, .Lctrout
 	b		.LctrloopNx
 	b		.LctrloopNx
-.LctroutNx:
-	sub		x5, x5, #1
-	rev		x7, x5
-	ins		v4.d[1], x7
-	b		.Lctrout
 .Lctr1x:
 .Lctr1x:
 	adds		w4, w4, #INTERLEAVE
 	adds		w4, w4, #INTERLEAVE
 	beq		.Lctrout
 	beq		.Lctrout
@@ -342,30 +329,39 @@ AES_ENTRY(aes_ctr_encrypt)
 .Lctrloop:
 .Lctrloop:
 	mov		v0.16b, v4.16b
 	mov		v0.16b, v4.16b
 	encrypt_block	v0, w3, x2, x6, w7
 	encrypt_block	v0, w3, x2, x6, w7
+
+	adds		x8, x8, #1		/* increment BE ctr */
+	rev		x7, x8
+	ins		v4.d[1], x7
+	bcs		.Lctrcarry		/* overflow? */
+
+.Lctrcarrydone:
 	subs		w4, w4, #1
 	subs		w4, w4, #1
 	bmi		.Lctrhalfblock		/* blocks < 0 means 1/2 block */
 	bmi		.Lctrhalfblock		/* blocks < 0 means 1/2 block */
 	ld1		{v3.16b}, [x1], #16
 	ld1		{v3.16b}, [x1], #16
 	eor		v3.16b, v0.16b, v3.16b
 	eor		v3.16b, v0.16b, v3.16b
 	st1		{v3.16b}, [x0], #16
 	st1		{v3.16b}, [x0], #16
-	beq		.Lctrout
-.Lctrinc:
-	adds		x5, x5, #1		/* increment BE ctr */
-	rev		x7, x5
-	ins		v4.d[1], x7
-	bcc		.Lctrloop		/* no overflow? */
-	umov		x7, v4.d[0]		/* load upper word of ctr  */
-	rev		x7, x7			/* ... to handle the carry */
-	add		x7, x7, #1
-	rev		x7, x7
-	ins		v4.d[0], x7
-	b		.Lctrloop
+	bne		.Lctrloop
+
+.Lctrout:
+	st1		{v4.16b}, [x5]		/* return next CTR value */
+	FRAME_POP
+	ret
+
 .Lctrhalfblock:
 .Lctrhalfblock:
 	ld1		{v3.8b}, [x1]
 	ld1		{v3.8b}, [x1]
 	eor		v3.8b, v0.8b, v3.8b
 	eor		v3.8b, v0.8b, v3.8b
 	st1		{v3.8b}, [x0]
 	st1		{v3.8b}, [x0]
-.Lctrout:
 	FRAME_POP
 	FRAME_POP
 	ret
 	ret
+
+.Lctrcarry:
+	umov		x7, v4.d[0]		/* load upper word of ctr  */
+	rev		x7, x7			/* ... to handle the carry */
+	add		x7, x7, #1
+	rev		x7, x7
+	ins		v4.d[0], x7
+	b		.Lctrcarrydone
 AES_ENDPROC(aes_ctr_encrypt)
 AES_ENDPROC(aes_ctr_encrypt)
 	.ltorg
 	.ltorg
 
 

+ 7 - 1
arch/arm64/kernel/topology.c

@@ -11,6 +11,7 @@
  * for more details.
  * for more details.
  */
  */
 
 
+#include <linux/acpi.h>
 #include <linux/cpu.h>
 #include <linux/cpu.h>
 #include <linux/cpumask.h>
 #include <linux/cpumask.h>
 #include <linux/init.h>
 #include <linux/init.h>
@@ -209,7 +210,12 @@ static struct notifier_block init_cpu_capacity_notifier = {
 
 
 static int __init register_cpufreq_notifier(void)
 static int __init register_cpufreq_notifier(void)
 {
 {
-	if (cap_parsing_failed)
+	/*
+	 * on ACPI-based systems we need to use the default cpu capacity
+	 * until we have the necessary code to parse the cpu capacity, so
+	 * skip registering cpufreq notifier.
+	 */
+	if (!acpi_disabled || cap_parsing_failed)
 		return -EINVAL;
 		return -EINVAL;
 
 
 	if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
 	if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {

+ 7 - 1
arch/parisc/include/asm/bitops.h

@@ -6,7 +6,7 @@
 #endif
 #endif
 
 
 #include <linux/compiler.h>
 #include <linux/compiler.h>
-#include <asm/types.h>		/* for BITS_PER_LONG/SHIFT_PER_LONG */
+#include <asm/types.h>
 #include <asm/byteorder.h>
 #include <asm/byteorder.h>
 #include <asm/barrier.h>
 #include <asm/barrier.h>
 #include <linux/atomic.h>
 #include <linux/atomic.h>
@@ -17,6 +17,12 @@
  * to include/asm-i386/bitops.h or kerneldoc
  * to include/asm-i386/bitops.h or kerneldoc
  */
  */
 
 
+#if __BITS_PER_LONG == 64
+#define SHIFT_PER_LONG 6
+#else
+#define SHIFT_PER_LONG 5
+#endif
+
 #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
 #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
 
 
 
 

+ 0 - 2
arch/parisc/include/uapi/asm/bitsperlong.h

@@ -3,10 +3,8 @@
 
 
 #if defined(__LP64__)
 #if defined(__LP64__)
 #define __BITS_PER_LONG 64
 #define __BITS_PER_LONG 64
-#define SHIFT_PER_LONG 6
 #else
 #else
 #define __BITS_PER_LONG 32
 #define __BITS_PER_LONG 32
-#define SHIFT_PER_LONG 5
 #endif
 #endif
 
 
 #include <asm-generic/bitsperlong.h>
 #include <asm-generic/bitsperlong.h>

+ 3 - 2
arch/parisc/include/uapi/asm/swab.h

@@ -1,6 +1,7 @@
 #ifndef _PARISC_SWAB_H
 #ifndef _PARISC_SWAB_H
 #define _PARISC_SWAB_H
 #define _PARISC_SWAB_H
 
 
+#include <asm/bitsperlong.h>
 #include <linux/types.h>
 #include <linux/types.h>
 #include <linux/compiler.h>
 #include <linux/compiler.h>
 
 
@@ -38,7 +39,7 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
 }
 }
 #define __arch_swab32 __arch_swab32
 #define __arch_swab32 __arch_swab32
 
 
-#if BITS_PER_LONG > 32
+#if __BITS_PER_LONG > 32
 /*
 /*
 ** From "PA-RISC 2.0 Architecture", HP Professional Books.
 ** From "PA-RISC 2.0 Architecture", HP Professional Books.
 ** See Appendix I page 8 , "Endian Byte Swapping".
 ** See Appendix I page 8 , "Endian Byte Swapping".
@@ -61,6 +62,6 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
 	return x;
 	return x;
 }
 }
 #define __arch_swab64 __arch_swab64
 #define __arch_swab64 __arch_swab64
-#endif /* BITS_PER_LONG > 32 */
+#endif /* __BITS_PER_LONG > 32 */
 
 
 #endif /* _PARISC_SWAB_H */
 #endif /* _PARISC_SWAB_H */

+ 1 - 1
arch/powerpc/Kconfig

@@ -164,7 +164,6 @@ config PPC
 	select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
 	select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
 	select HAVE_ARCH_HARDENED_USERCOPY
 	select HAVE_ARCH_HARDENED_USERCOPY
 	select HAVE_KERNEL_GZIP
 	select HAVE_KERNEL_GZIP
-	select HAVE_CC_STACKPROTECTOR
 
 
 config GENERIC_CSUM
 config GENERIC_CSUM
 	def_bool CPU_LITTLE_ENDIAN
 	def_bool CPU_LITTLE_ENDIAN
@@ -484,6 +483,7 @@ config RELOCATABLE
 	bool "Build a relocatable kernel"
 	bool "Build a relocatable kernel"
 	depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE))
 	depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE))
 	select NONSTATIC_KERNEL
 	select NONSTATIC_KERNEL
+	select MODULE_REL_CRCS if MODVERSIONS
 	help
 	help
 	  This builds a kernel image that is capable of running at the
 	  This builds a kernel image that is capable of running at the
 	  location the kernel is loaded at. For ppc32, there is no any
 	  location the kernel is loaded at. For ppc32, there is no any

+ 2 - 0
arch/powerpc/include/asm/cpu_has_feature.h

@@ -23,7 +23,9 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
 {
 {
 	int i;
 	int i;
 
 
+#ifndef __clang__ /* clang can't cope with this */
 	BUILD_BUG_ON(!__builtin_constant_p(feature));
 	BUILD_BUG_ON(!__builtin_constant_p(feature));
+#endif
 
 
 #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
 #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
 	if (!static_key_initialized) {
 	if (!static_key_initialized) {

+ 2 - 0
arch/powerpc/include/asm/mmu.h

@@ -160,7 +160,9 @@ static __always_inline bool mmu_has_feature(unsigned long feature)
 {
 {
 	int i;
 	int i;
 
 
+#ifndef __clang__ /* clang can't cope with this */
 	BUILD_BUG_ON(!__builtin_constant_p(feature));
 	BUILD_BUG_ON(!__builtin_constant_p(feature));
+#endif
 
 
 #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
 #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
 	if (!static_key_initialized) {
 	if (!static_key_initialized) {

+ 0 - 4
arch/powerpc/include/asm/module.h

@@ -90,9 +90,5 @@ static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sec
 }
 }
 #endif
 #endif
 
 
-#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64)
-#define ARCH_RELOCATES_KCRCTAB
-#define reloc_start PHYSICAL_START
-#endif
 #endif /* __KERNEL__ */
 #endif /* __KERNEL__ */
 #endif	/* _ASM_POWERPC_MODULE_H */
 #endif	/* _ASM_POWERPC_MODULE_H */

+ 0 - 40
arch/powerpc/include/asm/stackprotector.h

@@ -1,40 +0,0 @@
-/*
- * GCC stack protector support.
- *
- * Stack protector works by putting predefined pattern at the start of
- * the stack frame and verifying that it hasn't been overwritten when
- * returning from the function.  The pattern is called stack canary
- * and gcc expects it to be defined by a global variable called
- * "__stack_chk_guard" on PPC.  This unfortunately means that on SMP
- * we cannot have a different canary value per task.
- */
-
-#ifndef _ASM_STACKPROTECTOR_H
-#define _ASM_STACKPROTECTOR_H
-
-#include <linux/random.h>
-#include <linux/version.h>
-#include <asm/reg.h>
-
-extern unsigned long __stack_chk_guard;
-
-/*
- * Initialize the stackprotector canary value.
- *
- * NOTE: this must only be called from functions that never return,
- * and it must always be inlined.
- */
-static __always_inline void boot_init_stack_canary(void)
-{
-	unsigned long canary;
-
-	/* Try to get a semi random initial value. */
-	get_random_bytes(&canary, sizeof(canary));
-	canary ^= mftb();
-	canary ^= LINUX_VERSION_CODE;
-
-	current->stack_canary = canary;
-	__stack_chk_guard = current->stack_canary;
-}
-
-#endif	/* _ASM_STACKPROTECTOR_H */

+ 0 - 4
arch/powerpc/kernel/Makefile

@@ -19,10 +19,6 @@ CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 
 
-# -fstack-protector triggers protection checks in this code,
-# but it is being used too early to link to meaningful stack_chk logic.
-CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector)
-
 ifdef CONFIG_FUNCTION_TRACER
 ifdef CONFIG_FUNCTION_TRACER
 # Do not trace early boot code
 # Do not trace early boot code
 CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
 CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)

+ 0 - 3
arch/powerpc/kernel/asm-offsets.c

@@ -91,9 +91,6 @@ int main(void)
 	DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp));
 	DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp));
 #endif
 #endif
 
 
-#ifdef CONFIG_CC_STACKPROTECTOR
-	DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
-#endif
 	DEFINE(KSP, offsetof(struct thread_struct, ksp));
 	DEFINE(KSP, offsetof(struct thread_struct, ksp));
 	DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
 	DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
 #ifdef CONFIG_BOOKE
 #ifdef CONFIG_BOOKE

+ 1 - 1
arch/powerpc/kernel/eeh_driver.c

@@ -545,7 +545,7 @@ static void *eeh_pe_detach_dev(void *data, void *userdata)
 static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
 static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
 {
 {
 	struct eeh_pe *pe = (struct eeh_pe *)data;
 	struct eeh_pe *pe = (struct eeh_pe *)data;
-	bool *clear_sw_state = flag;
+	bool clear_sw_state = *(bool *)flag;
 	int i, rc = 1;
 	int i, rc = 1;
 
 
 	for (i = 0; rc && i < 3; i++)
 	for (i = 0; rc && i < 3; i++)

+ 1 - 5
arch/powerpc/kernel/entry_32.S

@@ -674,11 +674,7 @@ BEGIN_FTR_SECTION
 	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
 	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
 END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 #endif /* CONFIG_SPE */
 #endif /* CONFIG_SPE */
-#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
-	lwz	r0,TSK_STACK_CANARY(r2)
-	lis	r4,__stack_chk_guard@ha
-	stw	r0,__stack_chk_guard@l(r4)
-#endif
+
 	lwz	r0,_CCR(r1)
 	lwz	r0,_CCR(r1)
 	mtcrf	0xFF,r0
 	mtcrf	0xFF,r0
 	/* r3-r12 are destroyed -- Cort */
 	/* r3-r12 are destroyed -- Cort */

+ 0 - 8
arch/powerpc/kernel/module_64.c

@@ -286,14 +286,6 @@ static void dedotify_versions(struct modversion_info *vers,
 	for (end = (void *)vers + size; vers < end; vers++)
 	for (end = (void *)vers + size; vers < end; vers++)
 		if (vers->name[0] == '.') {
 		if (vers->name[0] == '.') {
 			memmove(vers->name, vers->name+1, strlen(vers->name));
 			memmove(vers->name, vers->name+1, strlen(vers->name));
-#ifdef ARCH_RELOCATES_KCRCTAB
-			/* The TOC symbol has no CRC computed. To avoid CRC
-			 * check failing, we must force it to the expected
-			 * value (see CRC check in module.c).
-			 */
-			if (!strcmp(vers->name, "TOC."))
-				vers->crc = -(unsigned long)reloc_start;
-#endif
 		}
 		}
 }
 }
 
 

+ 0 - 6
arch/powerpc/kernel/process.c

@@ -64,12 +64,6 @@
 #include <linux/kprobes.h>
 #include <linux/kprobes.h>
 #include <linux/kdebug.h>
 #include <linux/kdebug.h>
 
 
-#ifdef CONFIG_CC_STACKPROTECTOR
-#include <linux/stackprotector.h>
-unsigned long __stack_chk_guard __read_mostly;
-EXPORT_SYMBOL(__stack_chk_guard);
-#endif
-
 /* Transactional Memory debug */
 /* Transactional Memory debug */
 #ifdef TM_DEBUG_SW
 #ifdef TM_DEBUG_SW
 #define TM_DEBUG(x...) printk(KERN_INFO x)
 #define TM_DEBUG(x...) printk(KERN_INFO x)

+ 3 - 0
arch/powerpc/kernel/prom_init.c

@@ -2834,6 +2834,9 @@ static void __init prom_find_boot_cpu(void)
 
 
 	cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
 	cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
 
 
+	if (!PHANDLE_VALID(cpu_pkg))
+		return;
+
 	prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
 	prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
 	prom.cpu = be32_to_cpu(rval);
 	prom.cpu = be32_to_cpu(rval);
 
 

+ 2 - 2
arch/powerpc/mm/pgtable-radix.c

@@ -65,7 +65,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
 		if (!pmdp)
 		if (!pmdp)
 			return -ENOMEM;
 			return -ENOMEM;
 		if (map_page_size == PMD_SIZE) {
 		if (map_page_size == PMD_SIZE) {
-			ptep = (pte_t *)pudp;
+			ptep = pmdp_ptep(pmdp);
 			goto set_the_pte;
 			goto set_the_pte;
 		}
 		}
 		ptep = pte_alloc_kernel(pmdp, ea);
 		ptep = pte_alloc_kernel(pmdp, ea);
@@ -90,7 +90,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
 		}
 		}
 		pmdp = pmd_offset(pudp, ea);
 		pmdp = pmd_offset(pudp, ea);
 		if (map_page_size == PMD_SIZE) {
 		if (map_page_size == PMD_SIZE) {
-			ptep = (pte_t *)pudp;
+			ptep = pmdp_ptep(pmdp);
 			goto set_the_pte;
 			goto set_the_pte;
 		}
 		}
 		if (!pmd_present(*pmdp)) {
 		if (!pmd_present(*pmdp)) {

+ 8 - 0
arch/s390/kernel/ptrace.c

@@ -963,6 +963,11 @@ static int s390_fpregs_set(struct task_struct *target,
 	if (target == current)
 	if (target == current)
 		save_fpu_regs();
 		save_fpu_regs();
 
 
+	if (MACHINE_HAS_VX)
+		convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
+	else
+		memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
+
 	/* If setting FPC, must validate it first. */
 	/* If setting FPC, must validate it first. */
 	if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
 	if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
 		u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
 		u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
@@ -1067,6 +1072,9 @@ static int s390_vxrs_low_set(struct task_struct *target,
 	if (target == current)
 	if (target == current)
 		save_fpu_regs();
 		save_fpu_regs();
 
 
+	for (i = 0; i < __NUM_VXRS_LOW; i++)
+		vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
+
 	rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
 	rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
 	if (rc == 0)
 	if (rc == 0)
 		for (i = 0; i < __NUM_VXRS_LOW; i++)
 		for (i = 0; i < __NUM_VXRS_LOW; i++)

+ 4 - 3
arch/s390/mm/pgtable.c

@@ -202,7 +202,7 @@ static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
 	return pgste;
 	return pgste;
 }
 }
 
 
-static inline void ptep_xchg_commit(struct mm_struct *mm,
+static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
 				    unsigned long addr, pte_t *ptep,
 				    unsigned long addr, pte_t *ptep,
 				    pgste_t pgste, pte_t old, pte_t new)
 				    pgste_t pgste, pte_t old, pte_t new)
 {
 {
@@ -220,6 +220,7 @@ static inline void ptep_xchg_commit(struct mm_struct *mm,
 	} else {
 	} else {
 		*ptep = new;
 		*ptep = new;
 	}
 	}
+	return old;
 }
 }
 
 
 pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
 pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
@@ -231,7 +232,7 @@ pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
 	preempt_disable();
 	preempt_disable();
 	pgste = ptep_xchg_start(mm, addr, ptep);
 	pgste = ptep_xchg_start(mm, addr, ptep);
 	old = ptep_flush_direct(mm, addr, ptep);
 	old = ptep_flush_direct(mm, addr, ptep);
-	ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
+	old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
 	preempt_enable();
 	preempt_enable();
 	return old;
 	return old;
 }
 }
@@ -246,7 +247,7 @@ pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
 	preempt_disable();
 	preempt_disable();
 	pgste = ptep_xchg_start(mm, addr, ptep);
 	pgste = ptep_xchg_start(mm, addr, ptep);
 	old = ptep_flush_lazy(mm, addr, ptep);
 	old = ptep_flush_lazy(mm, addr, ptep);
-	ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
+	old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
 	preempt_enable();
 	preempt_enable();
 	return old;
 	return old;
 }
 }

+ 4 - 4
arch/sparc/include/asm/mmu_context_64.h

@@ -35,15 +35,15 @@ void __tsb_context_switch(unsigned long pgd_pa,
 static inline void tsb_context_switch(struct mm_struct *mm)
 static inline void tsb_context_switch(struct mm_struct *mm)
 {
 {
 	__tsb_context_switch(__pa(mm->pgd),
 	__tsb_context_switch(__pa(mm->pgd),
-			     &mm->context.tsb_block[0],
+			     &mm->context.tsb_block[MM_TSB_BASE],
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-			     (mm->context.tsb_block[1].tsb ?
-			      &mm->context.tsb_block[1] :
+			     (mm->context.tsb_block[MM_TSB_HUGE].tsb ?
+			      &mm->context.tsb_block[MM_TSB_HUGE] :
 			      NULL)
 			      NULL)
 #else
 #else
 			     NULL
 			     NULL
 #endif
 #endif
-			     , __pa(&mm->context.tsb_descr[0]));
+			     , __pa(&mm->context.tsb_descr[MM_TSB_BASE]));
 }
 }
 
 
 void tsb_grow(struct mm_struct *mm,
 void tsb_grow(struct mm_struct *mm,

+ 1 - 1
arch/sparc/kernel/irq_64.c

@@ -1021,7 +1021,7 @@ static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
 	unsigned long order = get_order(size);
 	unsigned long order = get_order(size);
 	unsigned long p;
 	unsigned long p;
 
 
-	p = __get_free_pages(GFP_KERNEL, order);
+	p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
 	if (!p) {
 	if (!p) {
 		prom_printf("SUN4V: Error, cannot allocate queue.\n");
 		prom_printf("SUN4V: Error, cannot allocate queue.\n");
 		prom_halt();
 		prom_halt();

+ 3 - 3
arch/sparc/kernel/sstate.c

@@ -43,8 +43,8 @@ static const char poweroff_msg[32] __attribute__((aligned(32))) =
 	"Linux powering off";
 	"Linux powering off";
 static const char rebooting_msg[32] __attribute__((aligned(32))) =
 static const char rebooting_msg[32] __attribute__((aligned(32))) =
 	"Linux rebooting";
 	"Linux rebooting";
-static const char panicing_msg[32] __attribute__((aligned(32))) =
-	"Linux panicing";
+static const char panicking_msg[32] __attribute__((aligned(32))) =
+	"Linux panicking";
 
 
 static int sstate_reboot_call(struct notifier_block *np, unsigned long type, void *_unused)
 static int sstate_reboot_call(struct notifier_block *np, unsigned long type, void *_unused)
 {
 {
@@ -76,7 +76,7 @@ static struct notifier_block sstate_reboot_notifier = {
 
 
 static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr)
 static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr)
 {
 {
-	do_set_sstate(HV_SOFT_STATE_TRANSITION, panicing_msg);
+	do_set_sstate(HV_SOFT_STATE_TRANSITION, panicking_msg);
 
 
 	return NOTIFY_DONE;
 	return NOTIFY_DONE;
 }
 }

+ 73 - 0
arch/sparc/kernel/traps_64.c

@@ -2051,6 +2051,73 @@ void sun4v_resum_overflow(struct pt_regs *regs)
 	atomic_inc(&sun4v_resum_oflow_cnt);
 	atomic_inc(&sun4v_resum_oflow_cnt);
 }
 }
 
 
+/* Given a set of registers, get the virtual addressi that was being accessed
+ * by the faulting instructions at tpc.
+ */
+static unsigned long sun4v_get_vaddr(struct pt_regs *regs)
+{
+	unsigned int insn;
+
+	if (!copy_from_user(&insn, (void __user *)regs->tpc, 4)) {
+		return compute_effective_address(regs, insn,
+						 (insn >> 25) & 0x1f);
+	}
+	return 0;
+}
+
+/* Attempt to handle non-resumable errors generated from userspace.
+ * Returns true if the signal was handled, false otherwise.
+ */
+bool sun4v_nonresum_error_user_handled(struct pt_regs *regs,
+				  struct sun4v_error_entry *ent) {
+
+	unsigned int attrs = ent->err_attrs;
+
+	if (attrs & SUN4V_ERR_ATTRS_MEMORY) {
+		unsigned long addr = ent->err_raddr;
+		siginfo_t info;
+
+		if (addr == ~(u64)0) {
+			/* This seems highly unlikely to ever occur */
+			pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory error detected in unknown location!\n");
+		} else {
+			unsigned long page_cnt = DIV_ROUND_UP(ent->err_size,
+							      PAGE_SIZE);
+
+			/* Break the unfortunate news. */
+			pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory failed at %016lX\n",
+				 addr);
+			pr_emerg("SUN4V NON-RECOVERABLE ERROR:   Claiming %lu ages.\n",
+				 page_cnt);
+
+			while (page_cnt-- > 0) {
+				if (pfn_valid(addr >> PAGE_SHIFT))
+					get_page(pfn_to_page(addr >> PAGE_SHIFT));
+				addr += PAGE_SIZE;
+			}
+		}
+		info.si_signo = SIGKILL;
+		info.si_errno = 0;
+		info.si_trapno = 0;
+		force_sig_info(info.si_signo, &info, current);
+
+		return true;
+	}
+	if (attrs & SUN4V_ERR_ATTRS_PIO) {
+		siginfo_t info;
+
+		info.si_signo = SIGBUS;
+		info.si_code = BUS_ADRERR;
+		info.si_addr = (void __user *)sun4v_get_vaddr(regs);
+		force_sig_info(info.si_signo, &info, current);
+
+		return true;
+	}
+
+	/* Default to doing nothing */
+	return false;
+}
+
 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
  * Log the event, clear the first word of the entry, and die.
  * Log the event, clear the first word of the entry, and die.
  */
  */
@@ -2075,6 +2142,12 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
 
 
 	put_cpu();
 	put_cpu();
 
 
+	if (!(regs->tstate & TSTATE_PRIV) &&
+	    sun4v_nonresum_error_user_handled(regs, &local_copy)) {
+		/* DON'T PANIC: This userspace error was handled. */
+		return;
+	}
+
 #ifdef CONFIG_PCI
 #ifdef CONFIG_PCI
 	/* Check for the special PCI poke sequence. */
 	/* Check for the special PCI poke sequence. */
 	if (pci_poke_in_progress && pci_poke_cpu == cpu) {
 	if (pci_poke_in_progress && pci_poke_cpu == cpu) {

+ 26 - 34
arch/x86/events/intel/rapl.c

@@ -161,7 +161,13 @@ static u64 rapl_timer_ms;
 
 
 static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
 static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
 {
 {
-	return rapl_pmus->pmus[topology_logical_package_id(cpu)];
+	unsigned int pkgid = topology_logical_package_id(cpu);
+
+	/*
+	 * The unsigned check also catches the '-1' return value for non
+	 * existent mappings in the topology map.
+	 */
+	return pkgid < rapl_pmus->maxpkg ? rapl_pmus->pmus[pkgid] : NULL;
 }
 }
 
 
 static inline u64 rapl_read_counter(struct perf_event *event)
 static inline u64 rapl_read_counter(struct perf_event *event)
@@ -402,6 +408,8 @@ static int rapl_pmu_event_init(struct perf_event *event)
 
 
 	/* must be done before validate_group */
 	/* must be done before validate_group */
 	pmu = cpu_to_rapl_pmu(event->cpu);
 	pmu = cpu_to_rapl_pmu(event->cpu);
+	if (!pmu)
+		return -EINVAL;
 	event->cpu = pmu->cpu;
 	event->cpu = pmu->cpu;
 	event->pmu_private = pmu;
 	event->pmu_private = pmu;
 	event->hw.event_base = msr;
 	event->hw.event_base = msr;
@@ -585,6 +593,20 @@ static int rapl_cpu_online(unsigned int cpu)
 	struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
 	struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
 	int target;
 	int target;
 
 
+	if (!pmu) {
+		pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
+		if (!pmu)
+			return -ENOMEM;
+
+		raw_spin_lock_init(&pmu->lock);
+		INIT_LIST_HEAD(&pmu->active_list);
+		pmu->pmu = &rapl_pmus->pmu;
+		pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
+		rapl_hrtimer_init(pmu);
+
+		rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
+	}
+
 	/*
 	/*
 	 * Check if there is an online cpu in the package which collects rapl
 	 * Check if there is an online cpu in the package which collects rapl
 	 * events already.
 	 * events already.
@@ -598,27 +620,6 @@ static int rapl_cpu_online(unsigned int cpu)
 	return 0;
 	return 0;
 }
 }
 
 
-static int rapl_cpu_prepare(unsigned int cpu)
-{
-	struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
-
-	if (pmu)
-		return 0;
-
-	pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
-	if (!pmu)
-		return -ENOMEM;
-
-	raw_spin_lock_init(&pmu->lock);
-	INIT_LIST_HEAD(&pmu->active_list);
-	pmu->pmu = &rapl_pmus->pmu;
-	pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
-	pmu->cpu = -1;
-	rapl_hrtimer_init(pmu);
-	rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
-	return 0;
-}
-
 static int rapl_check_hw_unit(bool apply_quirk)
 static int rapl_check_hw_unit(bool apply_quirk)
 {
 {
 	u64 msr_rapl_power_unit_bits;
 	u64 msr_rapl_power_unit_bits;
@@ -803,29 +804,21 @@ static int __init rapl_pmu_init(void)
 	/*
 	/*
 	 * Install callbacks. Core will call them for each online cpu.
 	 * Install callbacks. Core will call them for each online cpu.
 	 */
 	 */
-
-	ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "perf/x86/rapl:prepare",
-				rapl_cpu_prepare, NULL);
-	if (ret)
-		goto out;
-
 	ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
 	ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
 				"perf/x86/rapl:online",
 				"perf/x86/rapl:online",
 				rapl_cpu_online, rapl_cpu_offline);
 				rapl_cpu_online, rapl_cpu_offline);
 	if (ret)
 	if (ret)
-		goto out1;
+		goto out;
 
 
 	ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
 	ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
 	if (ret)
 	if (ret)
-		goto out2;
+		goto out1;
 
 
 	rapl_advertise();
 	rapl_advertise();
 	return 0;
 	return 0;
 
 
-out2:
-	cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
 out1:
 out1:
-	cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP);
+	cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
 out:
 out:
 	pr_warn("Initialization failed (%d), disabled\n", ret);
 	pr_warn("Initialization failed (%d), disabled\n", ret);
 	cleanup_rapl_pmus();
 	cleanup_rapl_pmus();
@@ -836,7 +829,6 @@ module_init(rapl_pmu_init);
 static void __exit intel_rapl_exit(void)
 static void __exit intel_rapl_exit(void)
 {
 {
 	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
 	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
-	cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP);
 	perf_pmu_unregister(&rapl_pmus->pmu);
 	perf_pmu_unregister(&rapl_pmus->pmu);
 	cleanup_rapl_pmus();
 	cleanup_rapl_pmus();
 }
 }

+ 91 - 141
arch/x86/events/intel/uncore.c

@@ -100,7 +100,13 @@ ssize_t uncore_event_show(struct kobject *kobj,
 
 
 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
 {
 {
-	return pmu->boxes[topology_logical_package_id(cpu)];
+	unsigned int pkgid = topology_logical_package_id(cpu);
+
+	/*
+	 * The unsigned check also catches the '-1' return value for non
+	 * existent mappings in the topology map.
+	 */
+	return pkgid < max_packages ? pmu->boxes[pkgid] : NULL;
 }
 }
 
 
 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
@@ -764,30 +770,6 @@ static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
 	pmu->registered = false;
 	pmu->registered = false;
 }
 }
 
 
-static void __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
-{
-	struct intel_uncore_pmu *pmu = type->pmus;
-	struct intel_uncore_box *box;
-	int i, pkg;
-
-	if (pmu) {
-		pkg = topology_physical_package_id(cpu);
-		for (i = 0; i < type->num_boxes; i++, pmu++) {
-			box = pmu->boxes[pkg];
-			if (box)
-				uncore_box_exit(box);
-		}
-	}
-}
-
-static void uncore_exit_boxes(void *dummy)
-{
-	struct intel_uncore_type **types;
-
-	for (types = uncore_msr_uncores; *types; types++)
-		__uncore_exit_boxes(*types++, smp_processor_id());
-}
-
 static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
 static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
 {
 {
 	int pkg;
 	int pkg;
@@ -1058,86 +1040,6 @@ static void uncore_pci_exit(void)
 	}
 	}
 }
 }
 
 
-static int uncore_cpu_dying(unsigned int cpu)
-{
-	struct intel_uncore_type *type, **types = uncore_msr_uncores;
-	struct intel_uncore_pmu *pmu;
-	struct intel_uncore_box *box;
-	int i, pkg;
-
-	pkg = topology_logical_package_id(cpu);
-	for (; *types; types++) {
-		type = *types;
-		pmu = type->pmus;
-		for (i = 0; i < type->num_boxes; i++, pmu++) {
-			box = pmu->boxes[pkg];
-			if (box && atomic_dec_return(&box->refcnt) == 0)
-				uncore_box_exit(box);
-		}
-	}
-	return 0;
-}
-
-static int first_init;
-
-static int uncore_cpu_starting(unsigned int cpu)
-{
-	struct intel_uncore_type *type, **types = uncore_msr_uncores;
-	struct intel_uncore_pmu *pmu;
-	struct intel_uncore_box *box;
-	int i, pkg, ncpus = 1;
-
-	if (first_init) {
-		/*
-		 * On init we get the number of online cpus in the package
-		 * and set refcount for all of them.
-		 */
-		ncpus = cpumask_weight(topology_core_cpumask(cpu));
-	}
-
-	pkg = topology_logical_package_id(cpu);
-	for (; *types; types++) {
-		type = *types;
-		pmu = type->pmus;
-		for (i = 0; i < type->num_boxes; i++, pmu++) {
-			box = pmu->boxes[pkg];
-			if (!box)
-				continue;
-			/* The first cpu on a package activates the box */
-			if (atomic_add_return(ncpus, &box->refcnt) == ncpus)
-				uncore_box_init(box);
-		}
-	}
-
-	return 0;
-}
-
-static int uncore_cpu_prepare(unsigned int cpu)
-{
-	struct intel_uncore_type *type, **types = uncore_msr_uncores;
-	struct intel_uncore_pmu *pmu;
-	struct intel_uncore_box *box;
-	int i, pkg;
-
-	pkg = topology_logical_package_id(cpu);
-	for (; *types; types++) {
-		type = *types;
-		pmu = type->pmus;
-		for (i = 0; i < type->num_boxes; i++, pmu++) {
-			if (pmu->boxes[pkg])
-				continue;
-			/* First cpu of a package allocates the box */
-			box = uncore_alloc_box(type, cpu_to_node(cpu));
-			if (!box)
-				return -ENOMEM;
-			box->pmu = pmu;
-			box->pkgid = pkg;
-			pmu->boxes[pkg] = box;
-		}
-	}
-	return 0;
-}
-
 static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
 static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
 				   int new_cpu)
 				   int new_cpu)
 {
 {
@@ -1177,12 +1079,14 @@ static void uncore_change_context(struct intel_uncore_type **uncores,
 
 
 static int uncore_event_cpu_offline(unsigned int cpu)
 static int uncore_event_cpu_offline(unsigned int cpu)
 {
 {
-	int target;
+	struct intel_uncore_type *type, **types = uncore_msr_uncores;
+	struct intel_uncore_pmu *pmu;
+	struct intel_uncore_box *box;
+	int i, pkg, target;
 
 
 	/* Check if exiting cpu is used for collecting uncore events */
 	/* Check if exiting cpu is used for collecting uncore events */
 	if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
 	if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
-		return 0;
-
+		goto unref;
 	/* Find a new cpu to collect uncore events */
 	/* Find a new cpu to collect uncore events */
 	target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
 	target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
 
 
@@ -1194,12 +1098,82 @@ static int uncore_event_cpu_offline(unsigned int cpu)
 
 
 	uncore_change_context(uncore_msr_uncores, cpu, target);
 	uncore_change_context(uncore_msr_uncores, cpu, target);
 	uncore_change_context(uncore_pci_uncores, cpu, target);
 	uncore_change_context(uncore_pci_uncores, cpu, target);
+
+unref:
+	/* Clear the references */
+	pkg = topology_logical_package_id(cpu);
+	for (; *types; types++) {
+		type = *types;
+		pmu = type->pmus;
+		for (i = 0; i < type->num_boxes; i++, pmu++) {
+			box = pmu->boxes[pkg];
+			if (box && atomic_dec_return(&box->refcnt) == 0)
+				uncore_box_exit(box);
+		}
+	}
 	return 0;
 	return 0;
 }
 }
 
 
+static int allocate_boxes(struct intel_uncore_type **types,
+			 unsigned int pkg, unsigned int cpu)
+{
+	struct intel_uncore_box *box, *tmp;
+	struct intel_uncore_type *type;
+	struct intel_uncore_pmu *pmu;
+	LIST_HEAD(allocated);
+	int i;
+
+	/* Try to allocate all required boxes */
+	for (; *types; types++) {
+		type = *types;
+		pmu = type->pmus;
+		for (i = 0; i < type->num_boxes; i++, pmu++) {
+			if (pmu->boxes[pkg])
+				continue;
+			box = uncore_alloc_box(type, cpu_to_node(cpu));
+			if (!box)
+				goto cleanup;
+			box->pmu = pmu;
+			box->pkgid = pkg;
+			list_add(&box->active_list, &allocated);
+		}
+	}
+	/* Install them in the pmus */
+	list_for_each_entry_safe(box, tmp, &allocated, active_list) {
+		list_del_init(&box->active_list);
+		box->pmu->boxes[pkg] = box;
+	}
+	return 0;
+
+cleanup:
+	list_for_each_entry_safe(box, tmp, &allocated, active_list) {
+		list_del_init(&box->active_list);
+		kfree(box);
+	}
+	return -ENOMEM;
+}
+
 static int uncore_event_cpu_online(unsigned int cpu)
 static int uncore_event_cpu_online(unsigned int cpu)
 {
 {
-	int target;
+	struct intel_uncore_type *type, **types = uncore_msr_uncores;
+	struct intel_uncore_pmu *pmu;
+	struct intel_uncore_box *box;
+	int i, ret, pkg, target;
+
+	pkg = topology_logical_package_id(cpu);
+	ret = allocate_boxes(types, pkg, cpu);
+	if (ret)
+		return ret;
+
+	for (; *types; types++) {
+		type = *types;
+		pmu = type->pmus;
+		for (i = 0; i < type->num_boxes; i++, pmu++) {
+			box = pmu->boxes[pkg];
+			if (!box && atomic_inc_return(&box->refcnt) == 1)
+				uncore_box_init(box);
+		}
+	}
 
 
 	/*
 	/*
 	 * Check if there is an online cpu in the package
 	 * Check if there is an online cpu in the package
@@ -1389,38 +1363,16 @@ static int __init intel_uncore_init(void)
 	if (cret && pret)
 	if (cret && pret)
 		return -ENODEV;
 		return -ENODEV;
 
 
-	/*
-	 * Install callbacks. Core will call them for each online cpu.
-	 *
-	 * The first online cpu of each package allocates and takes
-	 * the refcounts for all other online cpus in that package.
-	 * If msrs are not enabled no allocation is required and
-	 * uncore_cpu_prepare() is not called for each online cpu.
-	 */
-	if (!cret) {
-	       ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP,
-				       "perf/x86/intel/uncore:prepare",
-				       uncore_cpu_prepare, NULL);
-		if (ret)
-			goto err;
-	} else {
-		cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP,
-					  "perf/x86/intel/uncore:prepare",
-					  uncore_cpu_prepare, NULL);
-	}
-	first_init = 1;
-	cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
-			  "perf/x86/uncore:starting",
-			  uncore_cpu_starting, uncore_cpu_dying);
-	first_init = 0;
-	cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
-			  "perf/x86/uncore:online",
-			  uncore_event_cpu_online, uncore_event_cpu_offline);
+	/* Install hotplug callbacks to setup the targets for each package */
+	ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
+				"perf/x86/intel/uncore:online",
+				uncore_event_cpu_online,
+				uncore_event_cpu_offline);
+	if (ret)
+		goto err;
 	return 0;
 	return 0;
 
 
 err:
 err:
-	/* Undo box->init_box() */
-	on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
 	uncore_types_exit(uncore_msr_uncores);
 	uncore_types_exit(uncore_msr_uncores);
 	uncore_pci_exit();
 	uncore_pci_exit();
 	return ret;
 	return ret;
@@ -1429,9 +1381,7 @@ module_init(intel_uncore_init);
 
 
 static void __exit intel_uncore_exit(void)
 static void __exit intel_uncore_exit(void)
 {
 {
-	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
-	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING);
-	cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP);
+	cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
 	uncore_types_exit(uncore_msr_uncores);
 	uncore_types_exit(uncore_msr_uncores);
 	uncore_pci_exit();
 	uncore_pci_exit();
 }
 }

+ 1 - 0
arch/x86/include/asm/microcode.h

@@ -140,6 +140,7 @@ extern void __init load_ucode_bsp(void);
 extern void load_ucode_ap(void);
 extern void load_ucode_ap(void);
 void reload_early_microcode(void);
 void reload_early_microcode(void);
 extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
 extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
+extern bool initrd_gone;
 #else
 #else
 static inline int __init microcode_init(void)			{ return 0; };
 static inline int __init microcode_init(void)			{ return 0; };
 static inline void __init load_ucode_bsp(void)			{ }
 static inline void __init load_ucode_bsp(void)			{ }

+ 2 - 0
arch/x86/kernel/apic/io_apic.c

@@ -2117,6 +2117,7 @@ static inline void __init check_timer(void)
 			if (idx != -1 && irq_trigger(idx))
 			if (idx != -1 && irq_trigger(idx))
 				unmask_ioapic_irq(irq_get_chip_data(0));
 				unmask_ioapic_irq(irq_get_chip_data(0));
 		}
 		}
+		irq_domain_deactivate_irq(irq_data);
 		irq_domain_activate_irq(irq_data);
 		irq_domain_activate_irq(irq_data);
 		if (timer_irq_works()) {
 		if (timer_irq_works()) {
 			if (disable_timer_pin_1 > 0)
 			if (disable_timer_pin_1 > 0)
@@ -2138,6 +2139,7 @@ static inline void __init check_timer(void)
 		 * legacy devices should be connected to IO APIC #0
 		 * legacy devices should be connected to IO APIC #0
 		 */
 		 */
 		replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
 		replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
+		irq_domain_deactivate_irq(irq_data);
 		irq_domain_activate_irq(irq_data);
 		irq_domain_activate_irq(irq_data);
 		legacy_pic->unmask(0);
 		legacy_pic->unmask(0);
 		if (timer_irq_works()) {
 		if (timer_irq_works()) {

+ 12 - 19
arch/x86/kernel/cpu/mcheck/mce.c

@@ -1373,20 +1373,15 @@ static unsigned long mce_adjust_timer_default(unsigned long interval)
 
 
 static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
 static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
 
 
-static void __restart_timer(struct timer_list *t, unsigned long interval)
+static void __start_timer(struct timer_list *t, unsigned long interval)
 {
 {
 	unsigned long when = jiffies + interval;
 	unsigned long when = jiffies + interval;
 	unsigned long flags;
 	unsigned long flags;
 
 
 	local_irq_save(flags);
 	local_irq_save(flags);
 
 
-	if (timer_pending(t)) {
-		if (time_before(when, t->expires))
-			mod_timer(t, when);
-	} else {
-		t->expires = round_jiffies(when);
-		add_timer_on(t, smp_processor_id());
-	}
+	if (!timer_pending(t) || time_before(when, t->expires))
+		mod_timer(t, round_jiffies(when));
 
 
 	local_irq_restore(flags);
 	local_irq_restore(flags);
 }
 }
@@ -1421,7 +1416,7 @@ static void mce_timer_fn(unsigned long data)
 
 
 done:
 done:
 	__this_cpu_write(mce_next_interval, iv);
 	__this_cpu_write(mce_next_interval, iv);
-	__restart_timer(t, iv);
+	__start_timer(t, iv);
 }
 }
 
 
 /*
 /*
@@ -1432,7 +1427,7 @@ void mce_timer_kick(unsigned long interval)
 	struct timer_list *t = this_cpu_ptr(&mce_timer);
 	struct timer_list *t = this_cpu_ptr(&mce_timer);
 	unsigned long iv = __this_cpu_read(mce_next_interval);
 	unsigned long iv = __this_cpu_read(mce_next_interval);
 
 
-	__restart_timer(t, interval);
+	__start_timer(t, interval);
 
 
 	if (interval < iv)
 	if (interval < iv)
 		__this_cpu_write(mce_next_interval, interval);
 		__this_cpu_write(mce_next_interval, interval);
@@ -1779,17 +1774,15 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
 	}
 	}
 }
 }
 
 
-static void mce_start_timer(unsigned int cpu, struct timer_list *t)
+static void mce_start_timer(struct timer_list *t)
 {
 {
 	unsigned long iv = check_interval * HZ;
 	unsigned long iv = check_interval * HZ;
 
 
 	if (mca_cfg.ignore_ce || !iv)
 	if (mca_cfg.ignore_ce || !iv)
 		return;
 		return;
 
 
-	per_cpu(mce_next_interval, cpu) = iv;
-
-	t->expires = round_jiffies(jiffies + iv);
-	add_timer_on(t, cpu);
+	this_cpu_write(mce_next_interval, iv);
+	__start_timer(t, iv);
 }
 }
 
 
 static void __mcheck_cpu_setup_timer(void)
 static void __mcheck_cpu_setup_timer(void)
@@ -1806,7 +1799,7 @@ static void __mcheck_cpu_init_timer(void)
 	unsigned int cpu = smp_processor_id();
 	unsigned int cpu = smp_processor_id();
 
 
 	setup_pinned_timer(t, mce_timer_fn, cpu);
 	setup_pinned_timer(t, mce_timer_fn, cpu);
-	mce_start_timer(cpu, t);
+	mce_start_timer(t);
 }
 }
 
 
 /* Handle unconfigured int18 (should never happen) */
 /* Handle unconfigured int18 (should never happen) */
@@ -2566,7 +2559,7 @@ static int mce_cpu_dead(unsigned int cpu)
 
 
 static int mce_cpu_online(unsigned int cpu)
 static int mce_cpu_online(unsigned int cpu)
 {
 {
-	struct timer_list *t = &per_cpu(mce_timer, cpu);
+	struct timer_list *t = this_cpu_ptr(&mce_timer);
 	int ret;
 	int ret;
 
 
 	mce_device_create(cpu);
 	mce_device_create(cpu);
@@ -2577,13 +2570,13 @@ static int mce_cpu_online(unsigned int cpu)
 		return ret;
 		return ret;
 	}
 	}
 	mce_reenable_cpu();
 	mce_reenable_cpu();
-	mce_start_timer(cpu, t);
+	mce_start_timer(t);
 	return 0;
 	return 0;
 }
 }
 
 
 static int mce_cpu_pre_down(unsigned int cpu)
 static int mce_cpu_pre_down(unsigned int cpu)
 {
 {
-	struct timer_list *t = &per_cpu(mce_timer, cpu);
+	struct timer_list *t = this_cpu_ptr(&mce_timer);
 
 
 	mce_disable_cpu();
 	mce_disable_cpu();
 	del_timer_sync(t);
 	del_timer_sync(t);

+ 3 - 2
arch/x86/kernel/cpu/microcode/amd.c

@@ -384,8 +384,9 @@ void load_ucode_amd_ap(unsigned int family)
 reget:
 reget:
 		if (!get_builtin_microcode(&cp, family)) {
 		if (!get_builtin_microcode(&cp, family)) {
 #ifdef CONFIG_BLK_DEV_INITRD
 #ifdef CONFIG_BLK_DEV_INITRD
-			cp = find_cpio_data(ucode_path, (void *)initrd_start,
-					    initrd_end - initrd_start, NULL);
+			if (!initrd_gone)
+				cp = find_cpio_data(ucode_path, (void *)initrd_start,
+						    initrd_end - initrd_start, NULL);
 #endif
 #endif
 			if (!(cp.data && cp.size)) {
 			if (!(cp.data && cp.size)) {
 				/*
 				/*

+ 17 - 5
arch/x86/kernel/cpu/microcode/core.c

@@ -46,6 +46,8 @@
 static struct microcode_ops	*microcode_ops;
 static struct microcode_ops	*microcode_ops;
 static bool dis_ucode_ldr = true;
 static bool dis_ucode_ldr = true;
 
 
+bool initrd_gone;
+
 LIST_HEAD(microcode_cache);
 LIST_HEAD(microcode_cache);
 
 
 /*
 /*
@@ -190,21 +192,24 @@ void load_ucode_ap(void)
 static int __init save_microcode_in_initrd(void)
 static int __init save_microcode_in_initrd(void)
 {
 {
 	struct cpuinfo_x86 *c = &boot_cpu_data;
 	struct cpuinfo_x86 *c = &boot_cpu_data;
+	int ret = -EINVAL;
 
 
 	switch (c->x86_vendor) {
 	switch (c->x86_vendor) {
 	case X86_VENDOR_INTEL:
 	case X86_VENDOR_INTEL:
 		if (c->x86 >= 6)
 		if (c->x86 >= 6)
-			return save_microcode_in_initrd_intel();
+			ret = save_microcode_in_initrd_intel();
 		break;
 		break;
 	case X86_VENDOR_AMD:
 	case X86_VENDOR_AMD:
 		if (c->x86 >= 0x10)
 		if (c->x86 >= 0x10)
-			return save_microcode_in_initrd_amd(c->x86);
+			ret = save_microcode_in_initrd_amd(c->x86);
 		break;
 		break;
 	default:
 	default:
 		break;
 		break;
 	}
 	}
 
 
-	return -EINVAL;
+	initrd_gone = true;
+
+	return ret;
 }
 }
 
 
 struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
 struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
@@ -247,9 +252,16 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
 	 * has the virtual address of the beginning of the initrd. It also
 	 * has the virtual address of the beginning of the initrd. It also
 	 * possibly relocates the ramdisk. In either case, initrd_start contains
 	 * possibly relocates the ramdisk. In either case, initrd_start contains
 	 * the updated address so use that instead.
 	 * the updated address so use that instead.
+	 *
+	 * initrd_gone is for the hotplug case where we've thrown out initrd
+	 * already.
 	 */
 	 */
-	if (!use_pa && initrd_start)
-		start = initrd_start;
+	if (!use_pa) {
+		if (initrd_gone)
+			return (struct cpio_data){ NULL, 0, "" };
+		if (initrd_start)
+			start = initrd_start;
+	}
 
 
 	return find_cpio_data(path, (void *)start, size, NULL);
 	return find_cpio_data(path, (void *)start, size, NULL);
 #else /* !CONFIG_BLK_DEV_INITRD */
 #else /* !CONFIG_BLK_DEV_INITRD */

+ 1 - 8
arch/x86/kernel/cpu/microcode/intel.c

@@ -41,7 +41,7 @@
 
 
 static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
 static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
 
 
-/* Current microcode patch used in early patching */
+/* Current microcode patch used in early patching on the APs. */
 struct microcode_intel *intel_ucode_patch;
 struct microcode_intel *intel_ucode_patch;
 
 
 static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
 static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
@@ -607,12 +607,6 @@ int __init save_microcode_in_initrd_intel(void)
 	struct ucode_cpu_info uci;
 	struct ucode_cpu_info uci;
 	struct cpio_data cp;
 	struct cpio_data cp;
 
 
-	/*
-	 * AP loading didn't find any microcode patch, no need to save anything.
-	 */
-	if (!intel_ucode_patch || IS_ERR(intel_ucode_patch))
-		return 0;
-
 	if (!load_builtin_intel_microcode(&cp))
 	if (!load_builtin_intel_microcode(&cp))
 		cp = find_microcode_in_initrd(ucode_path, false);
 		cp = find_microcode_in_initrd(ucode_path, false);
 
 
@@ -628,7 +622,6 @@ int __init save_microcode_in_initrd_intel(void)
 	return 0;
 	return 0;
 }
 }
 
 
-
 /*
 /*
  * @res_patch, output: a pointer to the patch we found.
  * @res_patch, output: a pointer to the patch we found.
  */
  */

+ 3 - 1
arch/x86/kernel/fpu/core.c

@@ -9,6 +9,7 @@
 #include <asm/fpu/regset.h>
 #include <asm/fpu/regset.h>
 #include <asm/fpu/signal.h>
 #include <asm/fpu/signal.h>
 #include <asm/fpu/types.h>
 #include <asm/fpu/types.h>
+#include <asm/fpu/xstate.h>
 #include <asm/traps.h>
 #include <asm/traps.h>
 
 
 #include <linux/hardirq.h>
 #include <linux/hardirq.h>
@@ -183,7 +184,8 @@ void fpstate_init(union fpregs_state *state)
 	 * it will #GP. Make sure it is replaced after the memset().
 	 * it will #GP. Make sure it is replaced after the memset().
 	 */
 	 */
 	if (static_cpu_has(X86_FEATURE_XSAVES))
 	if (static_cpu_has(X86_FEATURE_XSAVES))
-		state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT;
+		state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT |
+					       xfeatures_mask;
 
 
 	if (static_cpu_has(X86_FEATURE_FXSR))
 	if (static_cpu_has(X86_FEATURE_FXSR))
 		fpstate_init_fxstate(&state->fxsave);
 		fpstate_init_fxstate(&state->fxsave);

+ 1 - 0
arch/x86/kernel/hpet.c

@@ -352,6 +352,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer)
 	} else {
 	} else {
 		struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
 		struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
 
 
+		irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
 		irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
 		irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
 		disable_irq(hdev->irq);
 		disable_irq(hdev->irq);
 		irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
 		irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));

+ 1 - 0
arch/x86/kvm/x86.c

@@ -3182,6 +3182,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
 	memcpy(dest, xsave, XSAVE_HDR_OFFSET);
 	memcpy(dest, xsave, XSAVE_HDR_OFFSET);
 
 
 	/* Set XSTATE_BV */
 	/* Set XSTATE_BV */
+	xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE;
 	*(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
 	*(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
 
 
 	/*
 	/*

+ 1 - 1
arch/xtensa/kernel/setup.c

@@ -419,7 +419,7 @@ subsys_initcall(topology_init);
 
 
 void cpu_reset(void)
 void cpu_reset(void)
 {
 {
-#if XCHAL_HAVE_PTP_MMU
+#if XCHAL_HAVE_PTP_MMU && IS_ENABLED(CONFIG_MMU)
 	local_irq_disable();
 	local_irq_disable();
 	/*
 	/*
 	 * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must
 	 * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must

+ 1 - 0
crypto/algapi.c

@@ -356,6 +356,7 @@ int crypto_register_alg(struct crypto_alg *alg)
 	struct crypto_larval *larval;
 	struct crypto_larval *larval;
 	int err;
 	int err;
 
 
+	alg->cra_flags &= ~CRYPTO_ALG_DEAD;
 	err = crypto_check_alg(alg);
 	err = crypto_check_alg(alg);
 	if (err)
 	if (err)
 		return err;
 		return err;

+ 2 - 7
drivers/acpi/acpica/tbdata.c

@@ -852,23 +852,18 @@ acpi_tb_install_and_load_table(acpi_physical_address address,
 
 
 	ACPI_FUNCTION_TRACE(tb_install_and_load_table);
 	ACPI_FUNCTION_TRACE(tb_install_and_load_table);
 
 
-	(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
-
 	/* Install the table and load it into the namespace */
 	/* Install the table and load it into the namespace */
 
 
 	status = acpi_tb_install_standard_table(address, flags, TRUE,
 	status = acpi_tb_install_standard_table(address, flags, TRUE,
 						override, &i);
 						override, &i);
 	if (ACPI_FAILURE(status)) {
 	if (ACPI_FAILURE(status)) {
-		goto unlock_and_exit;
+		goto exit;
 	}
 	}
 
 
-	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
 	status = acpi_tb_load_table(i, acpi_gbl_root_node);
 	status = acpi_tb_load_table(i, acpi_gbl_root_node);
-	(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
 
 
-unlock_and_exit:
+exit:
 	*table_index = i;
 	*table_index = i;
-	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
 	return_ACPI_STATUS(status);
 	return_ACPI_STATUS(status);
 }
 }
 
 

+ 15 - 2
drivers/acpi/acpica/tbinstal.c

@@ -217,6 +217,10 @@ acpi_tb_install_standard_table(acpi_physical_address address,
 		goto release_and_exit;
 		goto release_and_exit;
 	}
 	}
 
 
+	/* Acquire the table lock */
+
+	(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
 	if (reload) {
 	if (reload) {
 		/*
 		/*
 		 * Validate the incoming table signature.
 		 * Validate the incoming table signature.
@@ -244,7 +248,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
 					 new_table_desc.signature.integer));
 					 new_table_desc.signature.integer));
 
 
 			status = AE_BAD_SIGNATURE;
 			status = AE_BAD_SIGNATURE;
-			goto release_and_exit;
+			goto unlock_and_exit;
 		}
 		}
 
 
 		/* Check if table is already registered */
 		/* Check if table is already registered */
@@ -279,7 +283,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
 				/* Table is still loaded, this is an error */
 				/* Table is still loaded, this is an error */
 
 
 				status = AE_ALREADY_EXISTS;
 				status = AE_ALREADY_EXISTS;
-				goto release_and_exit;
+				goto unlock_and_exit;
 			} else {
 			} else {
 				/*
 				/*
 				 * Table was unloaded, allow it to be reloaded.
 				 * Table was unloaded, allow it to be reloaded.
@@ -290,6 +294,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
 				 * indicate the re-installation.
 				 * indicate the re-installation.
 				 */
 				 */
 				acpi_tb_uninstall_table(&new_table_desc);
 				acpi_tb_uninstall_table(&new_table_desc);
+				(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
 				*table_index = i;
 				*table_index = i;
 				return_ACPI_STATUS(AE_OK);
 				return_ACPI_STATUS(AE_OK);
 			}
 			}
@@ -303,11 +308,19 @@ acpi_tb_install_standard_table(acpi_physical_address address,
 
 
 	/* Invoke table handler if present */
 	/* Invoke table handler if present */
 
 
+	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
 	if (acpi_gbl_table_handler) {
 	if (acpi_gbl_table_handler) {
 		(void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_INSTALL,
 		(void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_INSTALL,
 					     new_table_desc.pointer,
 					     new_table_desc.pointer,
 					     acpi_gbl_table_handler_context);
 					     acpi_gbl_table_handler_context);
 	}
 	}
+	(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
+unlock_and_exit:
+
+	/* Release the table lock */
+
+	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
 
 
 release_and_exit:
 release_and_exit:
 
 

+ 0 - 8
drivers/acpi/sleep.c

@@ -674,14 +674,6 @@ static void acpi_sleep_suspend_setup(void)
 		if (acpi_sleep_state_supported(i))
 		if (acpi_sleep_state_supported(i))
 			sleep_states[i] = 1;
 			sleep_states[i] = 1;
 
 
-	/*
-	 * Use suspend-to-idle by default if ACPI_FADT_LOW_POWER_S0 is set and
-	 * the default suspend mode was not selected from the command line.
-	 */
-	if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0 &&
-	    mem_sleep_default > PM_SUSPEND_MEM)
-		mem_sleep_default = PM_SUSPEND_FREEZE;
-
 	suspend_set_ops(old_suspend_ordering ?
 	suspend_set_ops(old_suspend_ordering ?
 		&acpi_suspend_ops_old : &acpi_suspend_ops);
 		&acpi_suspend_ops_old : &acpi_suspend_ops);
 	freeze_set_ops(&acpi_freeze_ops);
 	freeze_set_ops(&acpi_freeze_ops);

+ 0 - 11
drivers/acpi/video_detect.c

@@ -305,17 +305,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
 		DMI_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L702X"),
 		DMI_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L702X"),
 		},
 		},
 	},
 	},
-	{
-	/* https://bugzilla.redhat.com/show_bug.cgi?id=1204476 */
-	/* https://bugs.launchpad.net/ubuntu/+source/linux-lts-trusty/+bug/1416940 */
-	.callback = video_detect_force_native,
-	.ident = "HP Pavilion dv6",
-	.matches = {
-		DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
-		DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv6 Notebook PC"),
-		},
-	},
-
 	{ },
 	{ },
 };
 };
 
 

+ 4 - 2
drivers/ata/libata-core.c

@@ -1702,6 +1702,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
 
 
 		if (qc->err_mask & ~AC_ERR_OTHER)
 		if (qc->err_mask & ~AC_ERR_OTHER)
 			qc->err_mask &= ~AC_ERR_OTHER;
 			qc->err_mask &= ~AC_ERR_OTHER;
+	} else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
+		qc->result_tf.command |= ATA_SENSE;
 	}
 	}
 
 
 	/* finish up */
 	/* finish up */
@@ -4356,10 +4358,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
 	{ "ST380013AS",		"3.20",		ATA_HORKAGE_MAX_SEC_1024 },
 	{ "ST380013AS",		"3.20",		ATA_HORKAGE_MAX_SEC_1024 },
 
 
 	/*
 	/*
-	 * Device times out with higher max sects.
+	 * These devices time out with higher max sects.
 	 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
 	 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
 	 */
 	 */
-	{ "LITEON CX1-JB256-HP", NULL,		ATA_HORKAGE_MAX_SEC_1024 },
+	{ "LITEON CX1-JB*-HP",	NULL,		ATA_HORKAGE_MAX_SEC_1024 },
 
 
 	/* Devices we expect to fail diagnostics */
 	/* Devices we expect to fail diagnostics */
 
 

+ 3 - 0
drivers/ata/sata_mv.c

@@ -4132,6 +4132,9 @@ static int mv_platform_probe(struct platform_device *pdev)
 	host->iomap = NULL;
 	host->iomap = NULL;
 	hpriv->base = devm_ioremap(&pdev->dev, res->start,
 	hpriv->base = devm_ioremap(&pdev->dev, res->start,
 				   resource_size(res));
 				   resource_size(res));
+	if (!hpriv->base)
+		return -ENOMEM;
+
 	hpriv->base -= SATAHC0_REG_BASE;
 	hpriv->base -= SATAHC0_REG_BASE;
 
 
 	hpriv->clk = clk_get(&pdev->dev, NULL);
 	hpriv->clk = clk_get(&pdev->dev, NULL);

+ 1 - 4
drivers/base/firmware_class.c

@@ -558,9 +558,6 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
 	struct firmware_buf *buf = fw_priv->buf;
 	struct firmware_buf *buf = fw_priv->buf;
 
 
 	__fw_load_abort(buf);
 	__fw_load_abort(buf);
-
-	/* avoid user action after loading abort */
-	fw_priv->buf = NULL;
 }
 }
 
 
 static LIST_HEAD(pending_fw_head);
 static LIST_HEAD(pending_fw_head);
@@ -713,7 +710,7 @@ static ssize_t firmware_loading_store(struct device *dev,
 
 
 	mutex_lock(&fw_lock);
 	mutex_lock(&fw_lock);
 	fw_buf = fw_priv->buf;
 	fw_buf = fw_priv->buf;
-	if (!fw_buf)
+	if (fw_state_is_aborted(&fw_buf->fw_st))
 		goto out;
 		goto out;
 
 
 	switch (loading) {
 	switch (loading) {

+ 6 - 6
drivers/base/memory.c

@@ -389,33 +389,33 @@ static ssize_t show_valid_zones(struct device *dev,
 {
 {
 	struct memory_block *mem = to_memory_block(dev);
 	struct memory_block *mem = to_memory_block(dev);
 	unsigned long start_pfn, end_pfn;
 	unsigned long start_pfn, end_pfn;
+	unsigned long valid_start, valid_end, valid_pages;
 	unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
 	unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
-	struct page *first_page;
 	struct zone *zone;
 	struct zone *zone;
 	int zone_shift = 0;
 	int zone_shift = 0;
 
 
 	start_pfn = section_nr_to_pfn(mem->start_section_nr);
 	start_pfn = section_nr_to_pfn(mem->start_section_nr);
 	end_pfn = start_pfn + nr_pages;
 	end_pfn = start_pfn + nr_pages;
-	first_page = pfn_to_page(start_pfn);
 
 
 	/* The block contains more than one zone can not be offlined. */
 	/* The block contains more than one zone can not be offlined. */
-	if (!test_pages_in_a_zone(start_pfn, end_pfn))
+	if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
 		return sprintf(buf, "none\n");
 		return sprintf(buf, "none\n");
 
 
-	zone = page_zone(first_page);
+	zone = page_zone(pfn_to_page(valid_start));
+	valid_pages = valid_end - valid_start;
 
 
 	/* MMOP_ONLINE_KEEP */
 	/* MMOP_ONLINE_KEEP */
 	sprintf(buf, "%s", zone->name);
 	sprintf(buf, "%s", zone->name);
 
 
 	/* MMOP_ONLINE_KERNEL */
 	/* MMOP_ONLINE_KERNEL */
-	zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift);
+	zone_can_shift(valid_start, valid_pages, ZONE_NORMAL, &zone_shift);
 	if (zone_shift) {
 	if (zone_shift) {
 		strcat(buf, " ");
 		strcat(buf, " ");
 		strcat(buf, (zone + zone_shift)->name);
 		strcat(buf, (zone + zone_shift)->name);
 	}
 	}
 
 
 	/* MMOP_ONLINE_MOVABLE */
 	/* MMOP_ONLINE_MOVABLE */
-	zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift);
+	zone_can_shift(valid_start, valid_pages, ZONE_MOVABLE, &zone_shift);
 	if (zone_shift) {
 	if (zone_shift) {
 		strcat(buf, " ");
 		strcat(buf, " ");
 		strcat(buf, (zone + zone_shift)->name);
 		strcat(buf, (zone + zone_shift)->name);

+ 3 - 0
drivers/bcma/bcma_private.h

@@ -45,6 +45,9 @@ int bcma_sprom_get(struct bcma_bus *bus);
 void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
 void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
 void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
 void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
 void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
 void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
+#endif /* CONFIG_BCMA_DRIVER_MIPS */
 
 
 /* driver_chipcommon_b.c */
 /* driver_chipcommon_b.c */
 int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb);
 int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb);

+ 3 - 8
drivers/bcma/driver_chipcommon.c

@@ -15,8 +15,6 @@
 #include <linux/platform_device.h>
 #include <linux/platform_device.h>
 #include <linux/bcma/bcma.h>
 #include <linux/bcma/bcma.h>
 
 
-static void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
-
 static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
 static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
 					 u32 mask, u32 value)
 					 u32 mask, u32 value)
 {
 {
@@ -186,9 +184,6 @@ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
 	if (cc->capabilities & BCMA_CC_CAP_PMU)
 	if (cc->capabilities & BCMA_CC_CAP_PMU)
 		bcma_pmu_early_init(cc);
 		bcma_pmu_early_init(cc);
 
 
-	if (IS_BUILTIN(CONFIG_BCM47XX) && bus->hosttype == BCMA_HOSTTYPE_SOC)
-		bcma_chipco_serial_init(cc);
-
 	if (bus->hosttype == BCMA_HOSTTYPE_SOC)
 	if (bus->hosttype == BCMA_HOSTTYPE_SOC)
 		bcma_core_chipcommon_flash_detect(cc);
 		bcma_core_chipcommon_flash_detect(cc);
 
 
@@ -378,9 +373,9 @@ u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value)
 	return res;
 	return res;
 }
 }
 
 
-static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
 {
 {
-#if IS_BUILTIN(CONFIG_BCM47XX)
 	unsigned int irq;
 	unsigned int irq;
 	u32 baud_base;
 	u32 baud_base;
 	u32 i;
 	u32 i;
@@ -422,5 +417,5 @@ static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
 		ports[i].baud_base = baud_base;
 		ports[i].baud_base = baud_base;
 		ports[i].reg_shift = 0;
 		ports[i].reg_shift = 0;
 	}
 	}
-#endif /* CONFIG_BCM47XX */
 }
 }
+#endif /* CONFIG_BCMA_DRIVER_MIPS */

+ 3 - 0
drivers/bcma/driver_mips.c

@@ -278,9 +278,12 @@ static void bcma_core_mips_nvram_init(struct bcma_drv_mips *mcore)
 
 
 void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
 void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
 {
 {
+	struct bcma_bus *bus = mcore->core->bus;
+
 	if (mcore->early_setup_done)
 	if (mcore->early_setup_done)
 		return;
 		return;
 
 
+	bcma_chipco_serial_init(&bus->drv_cc);
 	bcma_core_mips_nvram_init(mcore);
 	bcma_core_mips_nvram_init(mcore);
 
 
 	mcore->early_setup_done = true;
 	mcore->early_setup_done = true;

+ 14 - 8
drivers/block/xen-blkfront.c

@@ -197,13 +197,13 @@ struct blkfront_info
 	/* Number of pages per ring buffer. */
 	/* Number of pages per ring buffer. */
 	unsigned int nr_ring_pages;
 	unsigned int nr_ring_pages;
 	struct request_queue *rq;
 	struct request_queue *rq;
-	unsigned int feature_flush;
-	unsigned int feature_fua;
+	unsigned int feature_flush:1;
+	unsigned int feature_fua:1;
 	unsigned int feature_discard:1;
 	unsigned int feature_discard:1;
 	unsigned int feature_secdiscard:1;
 	unsigned int feature_secdiscard:1;
+	unsigned int feature_persistent:1;
 	unsigned int discard_granularity;
 	unsigned int discard_granularity;
 	unsigned int discard_alignment;
 	unsigned int discard_alignment;
-	unsigned int feature_persistent:1;
 	/* Number of 4KB segments handled */
 	/* Number of 4KB segments handled */
 	unsigned int max_indirect_segments;
 	unsigned int max_indirect_segments;
 	int is_ready;
 	int is_ready;
@@ -2223,7 +2223,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
 	}
 	}
 	else
 	else
 		grants = info->max_indirect_segments;
 		grants = info->max_indirect_segments;
-	psegs = grants / GRANTS_PER_PSEG;
+	psegs = DIV_ROUND_UP(grants, GRANTS_PER_PSEG);
 
 
 	err = fill_grant_buffer(rinfo,
 	err = fill_grant_buffer(rinfo,
 				(grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info));
 				(grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info));
@@ -2323,13 +2323,16 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
 		blkfront_setup_discard(info);
 		blkfront_setup_discard(info);
 
 
 	info->feature_persistent =
 	info->feature_persistent =
-		xenbus_read_unsigned(info->xbdev->otherend,
-				     "feature-persistent", 0);
+		!!xenbus_read_unsigned(info->xbdev->otherend,
+				       "feature-persistent", 0);
 
 
 	indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
 	indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
 					"feature-max-indirect-segments", 0);
 					"feature-max-indirect-segments", 0);
-	info->max_indirect_segments = min(indirect_segments,
-					  xen_blkif_max_segments);
+	if (indirect_segments > xen_blkif_max_segments)
+		indirect_segments = xen_blkif_max_segments;
+	if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST)
+		indirect_segments = 0;
+	info->max_indirect_segments = indirect_segments;
 }
 }
 
 
 /*
 /*
@@ -2652,6 +2655,9 @@ static int __init xlblk_init(void)
 	if (!xen_domain())
 	if (!xen_domain())
 		return -ENODEV;
 		return -ENODEV;
 
 
+	if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
+		xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
+
 	if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
 	if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
 		pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
 		pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
 			xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
 			xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);

+ 13 - 1
drivers/cpufreq/intel_pstate.c

@@ -2005,7 +2005,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 			limits = &performance_limits;
 			limits = &performance_limits;
 			perf_limits = limits;
 			perf_limits = limits;
 		}
 		}
-		if (policy->max >= policy->cpuinfo.max_freq) {
+		if (policy->max >= policy->cpuinfo.max_freq &&
+		    !limits->no_turbo) {
 			pr_debug("set performance\n");
 			pr_debug("set performance\n");
 			intel_pstate_set_performance_limits(perf_limits);
 			intel_pstate_set_performance_limits(perf_limits);
 			goto out;
 			goto out;
@@ -2047,6 +2048,17 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
 	    policy->policy != CPUFREQ_POLICY_PERFORMANCE)
 	    policy->policy != CPUFREQ_POLICY_PERFORMANCE)
 		return -EINVAL;
 		return -EINVAL;
 
 
+	/* When per-CPU limits are used, sysfs limits are not used */
+	if (!per_cpu_limits) {
+		unsigned int max_freq, min_freq;
+
+		max_freq = policy->cpuinfo.max_freq *
+						limits->max_sysfs_pct / 100;
+		min_freq = policy->cpuinfo.max_freq *
+						limits->min_sysfs_pct / 100;
+		cpufreq_verify_within_limits(policy, min_freq, max_freq);
+	}
+
 	return 0;
 	return 0;
 }
 }
 
 

+ 46 - 23
drivers/dma/cppi41.c

@@ -153,6 +153,8 @@ struct cppi41_dd {
 
 
 	/* context for suspend/resume */
 	/* context for suspend/resume */
 	unsigned int dma_tdfdq;
 	unsigned int dma_tdfdq;
+
+	bool is_suspended;
 };
 };
 
 
 #define FIST_COMPLETION_QUEUE	93
 #define FIST_COMPLETION_QUEUE	93
@@ -257,6 +259,10 @@ static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc)
 	BUG_ON(desc_num >= ALLOC_DECS_NUM);
 	BUG_ON(desc_num >= ALLOC_DECS_NUM);
 	c = cdd->chan_busy[desc_num];
 	c = cdd->chan_busy[desc_num];
 	cdd->chan_busy[desc_num] = NULL;
 	cdd->chan_busy[desc_num] = NULL;
+
+	/* Usecount for chan_busy[], paired with push_desc_queue() */
+	pm_runtime_put(cdd->ddev.dev);
+
 	return c;
 	return c;
 }
 }
 
 
@@ -317,12 +323,12 @@ static irqreturn_t cppi41_irq(int irq, void *data)
 
 
 		while (val) {
 		while (val) {
 			u32 desc, len;
 			u32 desc, len;
-			int error;
 
 
-			error = pm_runtime_get(cdd->ddev.dev);
-			if (error < 0)
-				dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
-					__func__, error);
+			/*
+			 * This should never trigger, see the comments in
+			 * push_desc_queue()
+			 */
+			WARN_ON(cdd->is_suspended);
 
 
 			q_num = __fls(val);
 			q_num = __fls(val);
 			val &= ~(1 << q_num);
 			val &= ~(1 << q_num);
@@ -343,9 +349,6 @@ static irqreturn_t cppi41_irq(int irq, void *data)
 			c->residue = pd_trans_len(c->desc->pd6) - len;
 			c->residue = pd_trans_len(c->desc->pd6) - len;
 			dma_cookie_complete(&c->txd);
 			dma_cookie_complete(&c->txd);
 			dmaengine_desc_get_callback_invoke(&c->txd, NULL);
 			dmaengine_desc_get_callback_invoke(&c->txd, NULL);
-
-			pm_runtime_mark_last_busy(cdd->ddev.dev);
-			pm_runtime_put_autosuspend(cdd->ddev.dev);
 		}
 		}
 	}
 	}
 	return IRQ_HANDLED;
 	return IRQ_HANDLED;
@@ -447,6 +450,15 @@ static void push_desc_queue(struct cppi41_channel *c)
 	 */
 	 */
 	__iowmb();
 	__iowmb();
 
 
+	/*
+	 * DMA transfers can take at least 200ms to complete with USB mass
+	 * storage connected. To prevent autosuspend timeouts, we must use
+	 * pm_runtime_get/put() when chan_busy[] is modified. This will get
+	 * cleared in desc_to_chan() or cppi41_stop_chan() depending on the
+	 * outcome of the transfer.
+	 */
+	pm_runtime_get(cdd->ddev.dev);
+
 	desc_phys = lower_32_bits(c->desc_phys);
 	desc_phys = lower_32_bits(c->desc_phys);
 	desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
 	desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
 	WARN_ON(cdd->chan_busy[desc_num]);
 	WARN_ON(cdd->chan_busy[desc_num]);
@@ -457,20 +469,26 @@ static void push_desc_queue(struct cppi41_channel *c)
 	cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
 	cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
 }
 }
 
 
-static void pending_desc(struct cppi41_channel *c)
+/*
+ * Caller must hold cdd->lock to prevent push_desc_queue()
+ * getting called out of order. We have both cppi41_dma_issue_pending()
+ * and cppi41_runtime_resume() call this function.
+ */
+static void cppi41_run_queue(struct cppi41_dd *cdd)
 {
 {
-	struct cppi41_dd *cdd = c->cdd;
-	unsigned long flags;
+	struct cppi41_channel *c, *_c;
 
 
-	spin_lock_irqsave(&cdd->lock, flags);
-	list_add_tail(&c->node, &cdd->pending);
-	spin_unlock_irqrestore(&cdd->lock, flags);
+	list_for_each_entry_safe(c, _c, &cdd->pending, node) {
+		push_desc_queue(c);
+		list_del(&c->node);
+	}
 }
 }
 
 
 static void cppi41_dma_issue_pending(struct dma_chan *chan)
 static void cppi41_dma_issue_pending(struct dma_chan *chan)
 {
 {
 	struct cppi41_channel *c = to_cpp41_chan(chan);
 	struct cppi41_channel *c = to_cpp41_chan(chan);
 	struct cppi41_dd *cdd = c->cdd;
 	struct cppi41_dd *cdd = c->cdd;
+	unsigned long flags;
 	int error;
 	int error;
 
 
 	error = pm_runtime_get(cdd->ddev.dev);
 	error = pm_runtime_get(cdd->ddev.dev);
@@ -482,10 +500,11 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
 		return;
 		return;
 	}
 	}
 
 
-	if (likely(pm_runtime_active(cdd->ddev.dev)))
-		push_desc_queue(c);
-	else
-		pending_desc(c);
+	spin_lock_irqsave(&cdd->lock, flags);
+	list_add_tail(&c->node, &cdd->pending);
+	if (!cdd->is_suspended)
+		cppi41_run_queue(cdd);
+	spin_unlock_irqrestore(&cdd->lock, flags);
 
 
 	pm_runtime_mark_last_busy(cdd->ddev.dev);
 	pm_runtime_mark_last_busy(cdd->ddev.dev);
 	pm_runtime_put_autosuspend(cdd->ddev.dev);
 	pm_runtime_put_autosuspend(cdd->ddev.dev);
@@ -705,6 +724,9 @@ static int cppi41_stop_chan(struct dma_chan *chan)
 	WARN_ON(!cdd->chan_busy[desc_num]);
 	WARN_ON(!cdd->chan_busy[desc_num]);
 	cdd->chan_busy[desc_num] = NULL;
 	cdd->chan_busy[desc_num] = NULL;
 
 
+	/* Usecount for chan_busy[], paired with push_desc_queue() */
+	pm_runtime_put(cdd->ddev.dev);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -1150,8 +1172,12 @@ static int __maybe_unused cppi41_resume(struct device *dev)
 static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
 static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
 {
 {
 	struct cppi41_dd *cdd = dev_get_drvdata(dev);
 	struct cppi41_dd *cdd = dev_get_drvdata(dev);
+	unsigned long flags;
 
 
+	spin_lock_irqsave(&cdd->lock, flags);
+	cdd->is_suspended = true;
 	WARN_ON(!list_empty(&cdd->pending));
 	WARN_ON(!list_empty(&cdd->pending));
+	spin_unlock_irqrestore(&cdd->lock, flags);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -1159,14 +1185,11 @@ static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
 static int __maybe_unused cppi41_runtime_resume(struct device *dev)
 static int __maybe_unused cppi41_runtime_resume(struct device *dev)
 {
 {
 	struct cppi41_dd *cdd = dev_get_drvdata(dev);
 	struct cppi41_dd *cdd = dev_get_drvdata(dev);
-	struct cppi41_channel *c, *_c;
 	unsigned long flags;
 	unsigned long flags;
 
 
 	spin_lock_irqsave(&cdd->lock, flags);
 	spin_lock_irqsave(&cdd->lock, flags);
-	list_for_each_entry_safe(c, _c, &cdd->pending, node) {
-		push_desc_queue(c);
-		list_del(&c->node);
-	}
+	cdd->is_suspended = false;
+	cppi41_run_queue(cdd);
 	spin_unlock_irqrestore(&cdd->lock, flags);
 	spin_unlock_irqrestore(&cdd->lock, flags);
 
 
 	return 0;
 	return 0;

+ 6 - 13
drivers/dma/pl330.c

@@ -1699,7 +1699,6 @@ static bool _chan_ns(const struct pl330_dmac *pl330, int i)
 static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
 static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
 {
 {
 	struct pl330_thread *thrd = NULL;
 	struct pl330_thread *thrd = NULL;
-	unsigned long flags;
 	int chans, i;
 	int chans, i;
 
 
 	if (pl330->state == DYING)
 	if (pl330->state == DYING)
@@ -1707,8 +1706,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
 
 
 	chans = pl330->pcfg.num_chan;
 	chans = pl330->pcfg.num_chan;
 
 
-	spin_lock_irqsave(&pl330->lock, flags);
-
 	for (i = 0; i < chans; i++) {
 	for (i = 0; i < chans; i++) {
 		thrd = &pl330->channels[i];
 		thrd = &pl330->channels[i];
 		if ((thrd->free) && (!_manager_ns(thrd) ||
 		if ((thrd->free) && (!_manager_ns(thrd) ||
@@ -1726,8 +1723,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
 		thrd = NULL;
 		thrd = NULL;
 	}
 	}
 
 
-	spin_unlock_irqrestore(&pl330->lock, flags);
-
 	return thrd;
 	return thrd;
 }
 }
 
 
@@ -1745,7 +1740,6 @@ static inline void _free_event(struct pl330_thread *thrd, int ev)
 static void pl330_release_channel(struct pl330_thread *thrd)
 static void pl330_release_channel(struct pl330_thread *thrd)
 {
 {
 	struct pl330_dmac *pl330;
 	struct pl330_dmac *pl330;
-	unsigned long flags;
 
 
 	if (!thrd || thrd->free)
 	if (!thrd || thrd->free)
 		return;
 		return;
@@ -1757,10 +1751,8 @@ static void pl330_release_channel(struct pl330_thread *thrd)
 
 
 	pl330 = thrd->dmac;
 	pl330 = thrd->dmac;
 
 
-	spin_lock_irqsave(&pl330->lock, flags);
 	_free_event(thrd, thrd->ev);
 	_free_event(thrd, thrd->ev);
 	thrd->free = true;
 	thrd->free = true;
-	spin_unlock_irqrestore(&pl330->lock, flags);
 }
 }
 
 
 /* Initialize the structure for PL330 configuration, that can be used
 /* Initialize the structure for PL330 configuration, that can be used
@@ -2122,20 +2114,20 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
 	struct pl330_dmac *pl330 = pch->dmac;
 	struct pl330_dmac *pl330 = pch->dmac;
 	unsigned long flags;
 	unsigned long flags;
 
 
-	spin_lock_irqsave(&pch->lock, flags);
+	spin_lock_irqsave(&pl330->lock, flags);
 
 
 	dma_cookie_init(chan);
 	dma_cookie_init(chan);
 	pch->cyclic = false;
 	pch->cyclic = false;
 
 
 	pch->thread = pl330_request_channel(pl330);
 	pch->thread = pl330_request_channel(pl330);
 	if (!pch->thread) {
 	if (!pch->thread) {
-		spin_unlock_irqrestore(&pch->lock, flags);
+		spin_unlock_irqrestore(&pl330->lock, flags);
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
 
 
 	tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
 	tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
 
 
-	spin_unlock_irqrestore(&pch->lock, flags);
+	spin_unlock_irqrestore(&pl330->lock, flags);
 
 
 	return 1;
 	return 1;
 }
 }
@@ -2238,12 +2230,13 @@ static int pl330_pause(struct dma_chan *chan)
 static void pl330_free_chan_resources(struct dma_chan *chan)
 static void pl330_free_chan_resources(struct dma_chan *chan)
 {
 {
 	struct dma_pl330_chan *pch = to_pchan(chan);
 	struct dma_pl330_chan *pch = to_pchan(chan);
+	struct pl330_dmac *pl330 = pch->dmac;
 	unsigned long flags;
 	unsigned long flags;
 
 
 	tasklet_kill(&pch->task);
 	tasklet_kill(&pch->task);
 
 
 	pm_runtime_get_sync(pch->dmac->ddma.dev);
 	pm_runtime_get_sync(pch->dmac->ddma.dev);
-	spin_lock_irqsave(&pch->lock, flags);
+	spin_lock_irqsave(&pl330->lock, flags);
 
 
 	pl330_release_channel(pch->thread);
 	pl330_release_channel(pch->thread);
 	pch->thread = NULL;
 	pch->thread = NULL;
@@ -2251,7 +2244,7 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
 	if (pch->cyclic)
 	if (pch->cyclic)
 		list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
 		list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
 
 
-	spin_unlock_irqrestore(&pch->lock, flags);
+	spin_unlock_irqrestore(&pl330->lock, flags);
 	pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
 	pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
 	pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
 	pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
 }
 }

+ 3 - 11
drivers/firmware/efi/libstub/fdt.c

@@ -187,6 +187,7 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
 struct exit_boot_struct {
 struct exit_boot_struct {
 	efi_memory_desc_t *runtime_map;
 	efi_memory_desc_t *runtime_map;
 	int *runtime_entry_count;
 	int *runtime_entry_count;
+	void *new_fdt_addr;
 };
 };
 
 
 static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
 static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
@@ -202,7 +203,7 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
 	efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
 	efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
 			p->runtime_map, p->runtime_entry_count);
 			p->runtime_map, p->runtime_entry_count);
 
 
-	return EFI_SUCCESS;
+	return update_fdt_memmap(p->new_fdt_addr, map);
 }
 }
 
 
 /*
 /*
@@ -300,22 +301,13 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
 
 
 	priv.runtime_map = runtime_map;
 	priv.runtime_map = runtime_map;
 	priv.runtime_entry_count = &runtime_entry_count;
 	priv.runtime_entry_count = &runtime_entry_count;
+	priv.new_fdt_addr = (void *)*new_fdt_addr;
 	status = efi_exit_boot_services(sys_table, handle, &map, &priv,
 	status = efi_exit_boot_services(sys_table, handle, &map, &priv,
 					exit_boot_func);
 					exit_boot_func);
 
 
 	if (status == EFI_SUCCESS) {
 	if (status == EFI_SUCCESS) {
 		efi_set_virtual_address_map_t *svam;
 		efi_set_virtual_address_map_t *svam;
 
 
-		status = update_fdt_memmap((void *)*new_fdt_addr, &map);
-		if (status != EFI_SUCCESS) {
-			/*
-			 * The kernel won't get far without the memory map, but
-			 * may still be able to print something meaningful so
-			 * return success here.
-			 */
-			return EFI_SUCCESS;
-		}
-
 		/* Install the new virtual address map */
 		/* Install the new virtual address map */
 		svam = sys_table->runtime->set_virtual_address_map;
 		svam = sys_table->runtime->set_virtual_address_map;
 		status = svam(runtime_entry_count * desc_size, desc_size,
 		status = svam(runtime_entry_count * desc_size, desc_size,

+ 7 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c

@@ -83,6 +83,13 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
 		}
 		}
 		break;
 		break;
 	}
 	}
+
+	if (!(*out_ring && (*out_ring)->adev)) {
+		DRM_ERROR("Ring %d is not initialized on IP %d\n",
+			  ring, ip_type);
+		return -EINVAL;
+	}
+
 	return 0;
 	return 0;
 }
 }
 
 

+ 1 - 4
drivers/gpu/drm/amd/amdgpu/dce_virtual.c

@@ -627,11 +627,8 @@ static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs =
 
 
 static void dce_virtual_encoder_destroy(struct drm_encoder *encoder)
 static void dce_virtual_encoder_destroy(struct drm_encoder *encoder)
 {
 {
-	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
-
-	kfree(amdgpu_encoder->enc_priv);
 	drm_encoder_cleanup(encoder);
 	drm_encoder_cleanup(encoder);
-	kfree(amdgpu_encoder);
+	kfree(encoder);
 }
 }
 
 
 static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
 static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {

+ 3 - 1
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c

@@ -254,6 +254,9 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
 	}
 	}
 	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
 	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
 
 
+	if (adev->mode_info.num_crtc)
+		amdgpu_display_set_vga_render_state(adev, false);
+
 	gmc_v6_0_mc_stop(adev, &save);
 	gmc_v6_0_mc_stop(adev, &save);
 
 
 	if (gmc_v6_0_wait_for_idle((void *)adev)) {
 	if (gmc_v6_0_wait_for_idle((void *)adev)) {
@@ -283,7 +286,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 	}
 	}
 	gmc_v6_0_mc_resume(adev, &save);
 	gmc_v6_0_mc_resume(adev, &save);
-	amdgpu_display_set_vga_render_state(adev, false);
 }
 }
 
 
 static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
 static int gmc_v6_0_mc_init(struct amdgpu_device *adev)

+ 1 - 0
drivers/gpu/drm/ast/ast_drv.h

@@ -113,6 +113,7 @@ struct ast_private {
 	struct ttm_bo_kmap_obj cache_kmap;
 	struct ttm_bo_kmap_obj cache_kmap;
 	int next_cursor;
 	int next_cursor;
 	bool support_wide_screen;
 	bool support_wide_screen;
+	bool DisableP2A;
 
 
 	enum ast_tx_chip tx_chip_type;
 	enum ast_tx_chip tx_chip_type;
 	u8 dp501_maxclk;
 	u8 dp501_maxclk;

+ 83 - 74
drivers/gpu/drm/ast/ast_main.c

@@ -124,6 +124,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
 	} else
 	} else
 		*need_post = false;
 		*need_post = false;
 
 
+	/* Check P2A Access */
+	ast->DisableP2A = true;
+	data = ast_read32(ast, 0xf004);
+	if (data != 0xFFFFFFFF)
+		ast->DisableP2A = false;
+
 	/* Check if we support wide screen */
 	/* Check if we support wide screen */
 	switch (ast->chip) {
 	switch (ast->chip) {
 	case AST1180:
 	case AST1180:
@@ -140,15 +146,17 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
 			ast->support_wide_screen = true;
 			ast->support_wide_screen = true;
 		else {
 		else {
 			ast->support_wide_screen = false;
 			ast->support_wide_screen = false;
-			/* Read SCU7c (silicon revision register) */
-			ast_write32(ast, 0xf004, 0x1e6e0000);
-			ast_write32(ast, 0xf000, 0x1);
-			data = ast_read32(ast, 0x1207c);
-			data &= 0x300;
-			if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
-				ast->support_wide_screen = true;
-			if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
-				ast->support_wide_screen = true;
+			if (ast->DisableP2A == false) {
+				/* Read SCU7c (silicon revision register) */
+				ast_write32(ast, 0xf004, 0x1e6e0000);
+				ast_write32(ast, 0xf000, 0x1);
+				data = ast_read32(ast, 0x1207c);
+				data &= 0x300;
+				if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
+					ast->support_wide_screen = true;
+				if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
+					ast->support_wide_screen = true;
+			}
 		}
 		}
 		break;
 		break;
 	}
 	}
@@ -216,80 +224,81 @@ static int ast_get_dram_info(struct drm_device *dev)
 	uint32_t data, data2;
 	uint32_t data, data2;
 	uint32_t denum, num, div, ref_pll;
 	uint32_t denum, num, div, ref_pll;
 
 
-	ast_write32(ast, 0xf004, 0x1e6e0000);
-	ast_write32(ast, 0xf000, 0x1);
-
-
-	ast_write32(ast, 0x10000, 0xfc600309);
-
-	do {
-		if (pci_channel_offline(dev->pdev))
-			return -EIO;
-	} while (ast_read32(ast, 0x10000) != 0x01);
-	data = ast_read32(ast, 0x10004);
-
-	if (data & 0x40)
+	if (ast->DisableP2A)
+	{
 		ast->dram_bus_width = 16;
 		ast->dram_bus_width = 16;
+		ast->dram_type = AST_DRAM_1Gx16;
+		ast->mclk = 396;
+	}
 	else
 	else
-		ast->dram_bus_width = 32;
+	{
+		ast_write32(ast, 0xf004, 0x1e6e0000);
+		ast_write32(ast, 0xf000, 0x1);
+		data = ast_read32(ast, 0x10004);
+
+		if (data & 0x40)
+			ast->dram_bus_width = 16;
+		else
+			ast->dram_bus_width = 32;
+
+		if (ast->chip == AST2300 || ast->chip == AST2400) {
+			switch (data & 0x03) {
+			case 0:
+				ast->dram_type = AST_DRAM_512Mx16;
+				break;
+			default:
+			case 1:
+				ast->dram_type = AST_DRAM_1Gx16;
+				break;
+			case 2:
+				ast->dram_type = AST_DRAM_2Gx16;
+				break;
+			case 3:
+				ast->dram_type = AST_DRAM_4Gx16;
+				break;
+			}
+		} else {
+			switch (data & 0x0c) {
+			case 0:
+			case 4:
+				ast->dram_type = AST_DRAM_512Mx16;
+				break;
+			case 8:
+				if (data & 0x40)
+					ast->dram_type = AST_DRAM_1Gx16;
+				else
+					ast->dram_type = AST_DRAM_512Mx32;
+				break;
+			case 0xc:
+				ast->dram_type = AST_DRAM_1Gx32;
+				break;
+			}
+		}
 
 
-	if (ast->chip == AST2300 || ast->chip == AST2400) {
-		switch (data & 0x03) {
-		case 0:
-			ast->dram_type = AST_DRAM_512Mx16;
-			break;
-		default:
-		case 1:
-			ast->dram_type = AST_DRAM_1Gx16;
-			break;
-		case 2:
-			ast->dram_type = AST_DRAM_2Gx16;
-			break;
+		data = ast_read32(ast, 0x10120);
+		data2 = ast_read32(ast, 0x10170);
+		if (data2 & 0x2000)
+			ref_pll = 14318;
+		else
+			ref_pll = 12000;
+
+		denum = data & 0x1f;
+		num = (data & 0x3fe0) >> 5;
+		data = (data & 0xc000) >> 14;
+		switch (data) {
 		case 3:
 		case 3:
-			ast->dram_type = AST_DRAM_4Gx16;
-			break;
-		}
-	} else {
-		switch (data & 0x0c) {
-		case 0:
-		case 4:
-			ast->dram_type = AST_DRAM_512Mx16;
+			div = 0x4;
 			break;
 			break;
-		case 8:
-			if (data & 0x40)
-				ast->dram_type = AST_DRAM_1Gx16;
-			else
-				ast->dram_type = AST_DRAM_512Mx32;
+		case 2:
+		case 1:
+			div = 0x2;
 			break;
 			break;
-		case 0xc:
-			ast->dram_type = AST_DRAM_1Gx32;
+		default:
+			div = 0x1;
 			break;
 			break;
 		}
 		}
+		ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
 	}
 	}
-
-	data = ast_read32(ast, 0x10120);
-	data2 = ast_read32(ast, 0x10170);
-	if (data2 & 0x2000)
-		ref_pll = 14318;
-	else
-		ref_pll = 12000;
-
-	denum = data & 0x1f;
-	num = (data & 0x3fe0) >> 5;
-	data = (data & 0xc000) >> 14;
-	switch (data) {
-	case 3:
-		div = 0x4;
-		break;
-	case 2:
-	case 1:
-		div = 0x2;
-		break;
-	default:
-		div = 0x1;
-		break;
-	}
-	ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
 	return 0;
 	return 0;
 }
 }
 
 

+ 13 - 5
drivers/gpu/drm/ast/ast_post.c

@@ -379,12 +379,20 @@ void ast_post_gpu(struct drm_device *dev)
 	ast_open_key(ast);
 	ast_open_key(ast);
 	ast_set_def_ext_reg(dev);
 	ast_set_def_ext_reg(dev);
 
 
-	if (ast->chip == AST2300 || ast->chip == AST2400)
-		ast_init_dram_2300(dev);
-	else
-		ast_init_dram_reg(dev);
+	if (ast->DisableP2A == false)
+	{
+		if (ast->chip == AST2300 || ast->chip == AST2400)
+			ast_init_dram_2300(dev);
+		else
+			ast_init_dram_reg(dev);
 
 
-	ast_init_3rdtx(dev);
+		ast_init_3rdtx(dev);
+	}
+	else
+	{
+		if (ast->tx_chip_type != AST_TX_NONE)
+			ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80);	/* Enable DVO */
+	}
 }
 }
 
 
 /* AST 2300 DRAM settings */
 /* AST 2300 DRAM settings */

+ 14 - 11
drivers/gpu/drm/drm_atomic.c

@@ -291,15 +291,15 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
 EXPORT_SYMBOL(drm_atomic_get_crtc_state);
 EXPORT_SYMBOL(drm_atomic_get_crtc_state);
 
 
 static void set_out_fence_for_crtc(struct drm_atomic_state *state,
 static void set_out_fence_for_crtc(struct drm_atomic_state *state,
-				   struct drm_crtc *crtc, s64 __user *fence_ptr)
+				   struct drm_crtc *crtc, s32 __user *fence_ptr)
 {
 {
 	state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
 	state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
 }
 }
 
 
-static s64 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
+static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
 					  struct drm_crtc *crtc)
 					  struct drm_crtc *crtc)
 {
 {
-	s64 __user *fence_ptr;
+	s32 __user *fence_ptr;
 
 
 	fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
 	fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
 	state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
 	state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
@@ -512,7 +512,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
 		state->color_mgmt_changed |= replaced;
 		state->color_mgmt_changed |= replaced;
 		return ret;
 		return ret;
 	} else if (property == config->prop_out_fence_ptr) {
 	} else if (property == config->prop_out_fence_ptr) {
-		s64 __user *fence_ptr = u64_to_user_ptr(val);
+		s32 __user *fence_ptr = u64_to_user_ptr(val);
 
 
 		if (!fence_ptr)
 		if (!fence_ptr)
 			return 0;
 			return 0;
@@ -1915,7 +1915,7 @@ EXPORT_SYMBOL(drm_atomic_clean_old_fb);
  */
  */
 
 
 struct drm_out_fence_state {
 struct drm_out_fence_state {
-	s64 __user *out_fence_ptr;
+	s32 __user *out_fence_ptr;
 	struct sync_file *sync_file;
 	struct sync_file *sync_file;
 	int fd;
 	int fd;
 };
 };
@@ -1952,7 +1952,7 @@ static int prepare_crtc_signaling(struct drm_device *dev,
 		return 0;
 		return 0;
 
 
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-		u64 __user *fence_ptr;
+		s32 __user *fence_ptr;
 
 
 		fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
 		fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
 
 
@@ -2032,13 +2032,16 @@ static void complete_crtc_signaling(struct drm_device *dev,
 	}
 	}
 
 
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		struct drm_pending_vblank_event *event = crtc_state->event;
 		/*
 		/*
-		 * TEST_ONLY and PAGE_FLIP_EVENT are mutually
-		 * exclusive, if they weren't, this code should be
-		 * called on success for TEST_ONLY too.
+		 * Free the allocated event. drm_atomic_helper_setup_commit
+		 * can allocate an event too, so only free it if it's ours
+		 * to prevent a double free in drm_atomic_state_clear.
 		 */
 		 */
-		if (crtc_state->event)
-			drm_event_cancel_free(dev, &crtc_state->event->base);
+		if (event && (event->base.fence || event->base.file_priv)) {
+			drm_event_cancel_free(dev, &event->base);
+			crtc_state->event = NULL;
+		}
 	}
 	}
 
 
 	if (!fence_state)
 	if (!fence_state)

+ 0 - 9
drivers/gpu/drm/drm_atomic_helper.c

@@ -1666,9 +1666,6 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
 
 
 		funcs = plane->helper_private;
 		funcs = plane->helper_private;
 
 
-		if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
-			continue;
-
 		if (funcs->prepare_fb) {
 		if (funcs->prepare_fb) {
 			ret = funcs->prepare_fb(plane, plane_state);
 			ret = funcs->prepare_fb(plane, plane_state);
 			if (ret)
 			if (ret)
@@ -1685,9 +1682,6 @@ fail:
 		if (j >= i)
 		if (j >= i)
 			continue;
 			continue;
 
 
-		if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
-			continue;
-
 		funcs = plane->helper_private;
 		funcs = plane->helper_private;
 
 
 		if (funcs->cleanup_fb)
 		if (funcs->cleanup_fb)
@@ -1954,9 +1948,6 @@ void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
 	for_each_plane_in_state(old_state, plane, plane_state, i) {
 	for_each_plane_in_state(old_state, plane, plane_state, i) {
 		const struct drm_plane_helper_funcs *funcs;
 		const struct drm_plane_helper_funcs *funcs;
 
 
-		if (!drm_atomic_helper_framebuffer_changed(dev, old_state, plane_state->crtc))
-			continue;
-
 		funcs = plane->helper_private;
 		funcs = plane->helper_private;
 
 
 		if (funcs->cleanup_fb)
 		if (funcs->cleanup_fb)

+ 18 - 5
drivers/gpu/drm/drm_connector.c

@@ -225,6 +225,7 @@ int drm_connector_init(struct drm_device *dev,
 
 
 	INIT_LIST_HEAD(&connector->probed_modes);
 	INIT_LIST_HEAD(&connector->probed_modes);
 	INIT_LIST_HEAD(&connector->modes);
 	INIT_LIST_HEAD(&connector->modes);
+	mutex_init(&connector->mutex);
 	connector->edid_blob_ptr = NULL;
 	connector->edid_blob_ptr = NULL;
 	connector->status = connector_status_unknown;
 	connector->status = connector_status_unknown;
 
 
@@ -359,6 +360,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
 		connector->funcs->atomic_destroy_state(connector,
 		connector->funcs->atomic_destroy_state(connector,
 						       connector->state);
 						       connector->state);
 
 
+	mutex_destroy(&connector->mutex);
+
 	memset(connector, 0, sizeof(*connector));
 	memset(connector, 0, sizeof(*connector));
 }
 }
 EXPORT_SYMBOL(drm_connector_cleanup);
 EXPORT_SYMBOL(drm_connector_cleanup);
@@ -374,14 +377,18 @@ EXPORT_SYMBOL(drm_connector_cleanup);
  */
  */
 int drm_connector_register(struct drm_connector *connector)
 int drm_connector_register(struct drm_connector *connector)
 {
 {
-	int ret;
+	int ret = 0;
 
 
-	if (connector->registered)
+	if (!connector->dev->registered)
 		return 0;
 		return 0;
 
 
+	mutex_lock(&connector->mutex);
+	if (connector->registered)
+		goto unlock;
+
 	ret = drm_sysfs_connector_add(connector);
 	ret = drm_sysfs_connector_add(connector);
 	if (ret)
 	if (ret)
-		return ret;
+		goto unlock;
 
 
 	ret = drm_debugfs_connector_add(connector);
 	ret = drm_debugfs_connector_add(connector);
 	if (ret) {
 	if (ret) {
@@ -397,12 +404,14 @@ int drm_connector_register(struct drm_connector *connector)
 	drm_mode_object_register(connector->dev, &connector->base);
 	drm_mode_object_register(connector->dev, &connector->base);
 
 
 	connector->registered = true;
 	connector->registered = true;
-	return 0;
+	goto unlock;
 
 
 err_debugfs:
 err_debugfs:
 	drm_debugfs_connector_remove(connector);
 	drm_debugfs_connector_remove(connector);
 err_sysfs:
 err_sysfs:
 	drm_sysfs_connector_remove(connector);
 	drm_sysfs_connector_remove(connector);
+unlock:
+	mutex_unlock(&connector->mutex);
 	return ret;
 	return ret;
 }
 }
 EXPORT_SYMBOL(drm_connector_register);
 EXPORT_SYMBOL(drm_connector_register);
@@ -415,8 +424,11 @@ EXPORT_SYMBOL(drm_connector_register);
  */
  */
 void drm_connector_unregister(struct drm_connector *connector)
 void drm_connector_unregister(struct drm_connector *connector)
 {
 {
-	if (!connector->registered)
+	mutex_lock(&connector->mutex);
+	if (!connector->registered) {
+		mutex_unlock(&connector->mutex);
 		return;
 		return;
+	}
 
 
 	if (connector->funcs->early_unregister)
 	if (connector->funcs->early_unregister)
 		connector->funcs->early_unregister(connector);
 		connector->funcs->early_unregister(connector);
@@ -425,6 +437,7 @@ void drm_connector_unregister(struct drm_connector *connector)
 	drm_debugfs_connector_remove(connector);
 	drm_debugfs_connector_remove(connector);
 
 
 	connector->registered = false;
 	connector->registered = false;
+	mutex_unlock(&connector->mutex);
 }
 }
 EXPORT_SYMBOL(drm_connector_unregister);
 EXPORT_SYMBOL(drm_connector_unregister);
 
 

+ 4 - 0
drivers/gpu/drm/drm_drv.c

@@ -745,6 +745,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
 	if (ret)
 	if (ret)
 		goto err_minors;
 		goto err_minors;
 
 
+	dev->registered = true;
+
 	if (dev->driver->load) {
 	if (dev->driver->load) {
 		ret = dev->driver->load(dev, flags);
 		ret = dev->driver->load(dev, flags);
 		if (ret)
 		if (ret)
@@ -785,6 +787,8 @@ void drm_dev_unregister(struct drm_device *dev)
 
 
 	drm_lastclose(dev);
 	drm_lastclose(dev);
 
 
+	dev->registered = false;
+
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		drm_modeset_unregister_all(dev);
 		drm_modeset_unregister_all(dev);
 
 

+ 31 - 20
drivers/gpu/drm/drm_probe_helper.c

@@ -115,27 +115,24 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
 
 
 #define DRM_OUTPUT_POLL_PERIOD (10*HZ)
 #define DRM_OUTPUT_POLL_PERIOD (10*HZ)
 /**
 /**
- * drm_kms_helper_poll_enable - re-enable output polling.
+ * drm_kms_helper_poll_enable_locked - re-enable output polling.
  * @dev: drm_device
  * @dev: drm_device
  *
  *
- * This function re-enables the output polling work, after it has been
- * temporarily disabled using drm_kms_helper_poll_disable(), for example over
- * suspend/resume.
+ * This function re-enables the output polling work without
+ * locking the mode_config mutex.
  *
  *
- * Drivers can call this helper from their device resume implementation. It is
- * an error to call this when the output polling support has not yet been set
- * up.
- *
- * Note that calls to enable and disable polling must be strictly ordered, which
- * is automatically the case when they're only call from suspend/resume
- * callbacks.
+ * This is like drm_kms_helper_poll_enable() however it is to be
+ * called from a context where the mode_config mutex is locked
+ * already.
  */
  */
-void drm_kms_helper_poll_enable(struct drm_device *dev)
+void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
 {
 {
 	bool poll = false;
 	bool poll = false;
 	struct drm_connector *connector;
 	struct drm_connector *connector;
 	unsigned long delay = DRM_OUTPUT_POLL_PERIOD;
 	unsigned long delay = DRM_OUTPUT_POLL_PERIOD;
 
 
+	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
 	if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
 	if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
 		return;
 		return;
 
 
@@ -163,7 +160,7 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
 	if (poll)
 	if (poll)
 		schedule_delayed_work(&dev->mode_config.output_poll_work, delay);
 		schedule_delayed_work(&dev->mode_config.output_poll_work, delay);
 }
 }
-EXPORT_SYMBOL(drm_kms_helper_poll_enable);
+EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked);
 
 
 static enum drm_connector_status
 static enum drm_connector_status
 drm_connector_detect(struct drm_connector *connector, bool force)
 drm_connector_detect(struct drm_connector *connector, bool force)
@@ -290,7 +287,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
 
 
 	/* Re-enable polling in case the global poll config changed. */
 	/* Re-enable polling in case the global poll config changed. */
 	if (drm_kms_helper_poll != dev->mode_config.poll_running)
 	if (drm_kms_helper_poll != dev->mode_config.poll_running)
-		drm_kms_helper_poll_enable(dev);
+		drm_kms_helper_poll_enable_locked(dev);
 
 
 	dev->mode_config.poll_running = drm_kms_helper_poll;
 	dev->mode_config.poll_running = drm_kms_helper_poll;
 
 
@@ -482,12 +479,8 @@ out:
  * This function disables the output polling work.
  * This function disables the output polling work.
  *
  *
  * Drivers can call this helper from their device suspend implementation. It is
  * Drivers can call this helper from their device suspend implementation. It is
- * not an error to call this even when output polling isn't enabled or already
- * disabled. Polling is re-enabled by calling drm_kms_helper_poll_enable().
- *
- * Note that calls to enable and disable polling must be strictly ordered, which
- * is automatically the case when they're only call from suspend/resume
- * callbacks.
+ * not an error to call this even when output polling isn't enabled or arlready
+ * disabled.
  */
  */
 void drm_kms_helper_poll_disable(struct drm_device *dev)
 void drm_kms_helper_poll_disable(struct drm_device *dev)
 {
 {
@@ -497,6 +490,24 @@ void drm_kms_helper_poll_disable(struct drm_device *dev)
 }
 }
 EXPORT_SYMBOL(drm_kms_helper_poll_disable);
 EXPORT_SYMBOL(drm_kms_helper_poll_disable);
 
 
+/**
+ * drm_kms_helper_poll_enable - re-enable output polling.
+ * @dev: drm_device
+ *
+ * This function re-enables the output polling work.
+ *
+ * Drivers can call this helper from their device resume implementation. It is
+ * an error to call this when the output polling support has not yet been set
+ * up.
+ */
+void drm_kms_helper_poll_enable(struct drm_device *dev)
+{
+	mutex_lock(&dev->mode_config.mutex);
+	drm_kms_helper_poll_enable_locked(dev);
+	mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_enable);
+
 /**
 /**
  * drm_kms_helper_poll_init - initialize and enable output polling
  * drm_kms_helper_poll_init - initialize and enable output polling
  * @dev: drm_device
  * @dev: drm_device

+ 0 - 4
drivers/gpu/drm/i915/gvt/cmd_parser.c

@@ -481,7 +481,6 @@ struct parser_exec_state {
 	(s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
 	(s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
 
 
 static unsigned long bypass_scan_mask = 0;
 static unsigned long bypass_scan_mask = 0;
-static bool bypass_batch_buffer_scan = true;
 
 
 /* ring ALL, type = 0 */
 /* ring ALL, type = 0 */
 static struct sub_op_bits sub_op_mi[] = {
 static struct sub_op_bits sub_op_mi[] = {
@@ -1525,9 +1524,6 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
 {
 {
 	struct intel_gvt *gvt = s->vgpu->gvt;
 	struct intel_gvt *gvt = s->vgpu->gvt;
 
 
-	if (bypass_batch_buffer_scan)
-		return 0;
-
 	if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
 	if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
 		/* BDW decides privilege based on address space */
 		/* BDW decides privilege based on address space */
 		if (cmd_val(s, 0) & (1 << 8))
 		if (cmd_val(s, 0) & (1 << 8))

+ 19 - 47
drivers/gpu/drm/i915/gvt/execlist.c

@@ -364,58 +364,30 @@ static void free_workload(struct intel_vgpu_workload *workload)
 #define get_desc_from_elsp_dwords(ed, i) \
 #define get_desc_from_elsp_dwords(ed, i) \
 	((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
 	((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
 
 
-
-#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
-#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
-static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj,
-			     unsigned long add, int gmadr_bytes)
-{
-	if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
-		return -1;
-
-	*((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add &
-		BATCH_BUFFER_ADDR_MASK;
-	if (gmadr_bytes == 8) {
-		*((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) =
-			add & BATCH_BUFFER_ADDR_HIGH_MASK;
-	}
-
-	return 0;
-}
-
 static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
 static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
 {
 {
-	int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+	const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+	struct intel_shadow_bb_entry *entry_obj;
 
 
 	/* pin the gem object to ggtt */
 	/* pin the gem object to ggtt */
-	if (!list_empty(&workload->shadow_bb)) {
-		struct intel_shadow_bb_entry *entry_obj =
-			list_first_entry(&workload->shadow_bb,
-					 struct intel_shadow_bb_entry,
-					 list);
-		struct intel_shadow_bb_entry *temp;
+	list_for_each_entry(entry_obj, &workload->shadow_bb, list) {
+		struct i915_vma *vma;
 
 
-		list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
-				list) {
-			struct i915_vma *vma;
-
-			vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0,
-						       4, 0);
-			if (IS_ERR(vma)) {
-				gvt_err("Cannot pin\n");
-				return;
-			}
-
-			/* FIXME: we are not tracking our pinned VMA leaving it
-			 * up to the core to fix up the stray pin_count upon
-			 * free.
-			 */
-
-			/* update the relocate gma with shadow batch buffer*/
-			set_gma_to_bb_cmd(entry_obj,
-					  i915_ggtt_offset(vma),
-					  gmadr_bytes);
+		vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
+		if (IS_ERR(vma)) {
+			gvt_err("Cannot pin\n");
+			return;
 		}
 		}
+
+		/* FIXME: we are not tracking our pinned VMA leaving it
+		 * up to the core to fix up the stray pin_count upon
+		 * free.
+		 */
+
+		/* update the relocate gma with shadow batch buffer*/
+		entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma);
+		if (gmadr_bytes == 8)
+			entry_obj->bb_start_cmd_va[2] = 0;
 	}
 	}
 }
 }
 
 
@@ -826,7 +798,7 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
 		INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
 		INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
 	}
 	}
 
 
-	vgpu->workloads = kmem_cache_create("gvt-g vgpu workload",
+	vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload",
 			sizeof(struct intel_vgpu_workload), 0,
 			sizeof(struct intel_vgpu_workload), 0,
 			SLAB_HWCACHE_ALIGN,
 			SLAB_HWCACHE_ALIGN,
 			NULL);
 			NULL);

+ 4 - 4
drivers/gpu/drm/i915/gvt/kvmgt.c

@@ -230,8 +230,8 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
 	return NULL;
 	return NULL;
 }
 }
 
 
-static ssize_t available_instance_show(struct kobject *kobj, struct device *dev,
-		char *buf)
+static ssize_t available_instances_show(struct kobject *kobj,
+					struct device *dev, char *buf)
 {
 {
 	struct intel_vgpu_type *type;
 	struct intel_vgpu_type *type;
 	unsigned int num = 0;
 	unsigned int num = 0;
@@ -269,12 +269,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev,
 				type->fence);
 				type->fence);
 }
 }
 
 
-static MDEV_TYPE_ATTR_RO(available_instance);
+static MDEV_TYPE_ATTR_RO(available_instances);
 static MDEV_TYPE_ATTR_RO(device_api);
 static MDEV_TYPE_ATTR_RO(device_api);
 static MDEV_TYPE_ATTR_RO(description);
 static MDEV_TYPE_ATTR_RO(description);
 
 
 static struct attribute *type_attrs[] = {
 static struct attribute *type_attrs[] = {
-	&mdev_type_attr_available_instance.attr,
+	&mdev_type_attr_available_instances.attr,
 	&mdev_type_attr_device_api.attr,
 	&mdev_type_attr_device_api.attr,
 	&mdev_type_attr_description.attr,
 	&mdev_type_attr_description.attr,
 	NULL,
 	NULL,

+ 1 - 1
drivers/gpu/drm/i915/gvt/scheduler.h

@@ -113,7 +113,7 @@ struct intel_shadow_bb_entry {
 	struct drm_i915_gem_object *obj;
 	struct drm_i915_gem_object *obj;
 	void *va;
 	void *va;
 	unsigned long len;
 	unsigned long len;
-	void *bb_start_cmd_va;
+	u32 *bb_start_cmd_va;
 };
 };
 
 
 #define workload_q_head(vgpu, ring_id) \
 #define workload_q_head(vgpu, ring_id) \

+ 1 - 1
drivers/gpu/drm/i915/i915_drv.c

@@ -2378,7 +2378,7 @@ static int intel_runtime_suspend(struct device *kdev)
 
 
 	assert_forcewakes_inactive(dev_priv);
 	assert_forcewakes_inactive(dev_priv);
 
 
-	if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv))
+	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
 		intel_hpd_poll_init(dev_priv);
 		intel_hpd_poll_init(dev_priv);
 
 
 	DRM_DEBUG_KMS("Device suspended\n");
 	DRM_DEBUG_KMS("Device suspended\n");

+ 9 - 12
drivers/gpu/drm/i915/i915_drv.h

@@ -1012,6 +1012,8 @@ struct intel_fbc {
 	struct work_struct underrun_work;
 	struct work_struct underrun_work;
 
 
 	struct intel_fbc_state_cache {
 	struct intel_fbc_state_cache {
+		struct i915_vma *vma;
+
 		struct {
 		struct {
 			unsigned int mode_flags;
 			unsigned int mode_flags;
 			uint32_t hsw_bdw_pixel_rate;
 			uint32_t hsw_bdw_pixel_rate;
@@ -1025,15 +1027,14 @@ struct intel_fbc {
 		} plane;
 		} plane;
 
 
 		struct {
 		struct {
-			u64 ilk_ggtt_offset;
 			uint32_t pixel_format;
 			uint32_t pixel_format;
 			unsigned int stride;
 			unsigned int stride;
-			int fence_reg;
-			unsigned int tiling_mode;
 		} fb;
 		} fb;
 	} state_cache;
 	} state_cache;
 
 
 	struct intel_fbc_reg_params {
 	struct intel_fbc_reg_params {
+		struct i915_vma *vma;
+
 		struct {
 		struct {
 			enum pipe pipe;
 			enum pipe pipe;
 			enum plane plane;
 			enum plane plane;
@@ -1041,10 +1042,8 @@ struct intel_fbc {
 		} crtc;
 		} crtc;
 
 
 		struct {
 		struct {
-			u64 ggtt_offset;
 			uint32_t pixel_format;
 			uint32_t pixel_format;
 			unsigned int stride;
 			unsigned int stride;
-			int fence_reg;
 		} fb;
 		} fb;
 
 
 		int cfb_size;
 		int cfb_size;
@@ -1977,6 +1976,11 @@ struct drm_i915_private {
 
 
 	struct i915_frontbuffer_tracking fb_tracking;
 	struct i915_frontbuffer_tracking fb_tracking;
 
 
+	struct intel_atomic_helper {
+		struct llist_head free_list;
+		struct work_struct free_work;
+	} atomic_helper;
+
 	u16 orig_clock;
 	u16 orig_clock;
 
 
 	bool mchbar_need_disable;
 	bool mchbar_need_disable;
@@ -3163,13 +3167,6 @@ i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj,
 	return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view);
 	return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view);
 }
 }
 
 
-static inline unsigned long
-i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
-			    const struct i915_ggtt_view *view)
-{
-	return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view));
-}
-
 /* i915_gem_fence_reg.c */
 /* i915_gem_fence_reg.c */
 int __must_check i915_vma_get_fence(struct i915_vma *vma);
 int __must_check i915_vma_get_fence(struct i915_vma *vma);
 int __must_check i915_vma_put_fence(struct i915_vma *vma);
 int __must_check i915_vma_put_fence(struct i915_vma *vma);

+ 1 - 0
drivers/gpu/drm/i915/i915_vma.c

@@ -185,6 +185,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 			return ret;
 			return ret;
 	}
 	}
 
 
+	trace_i915_vma_bind(vma, bind_flags);
 	ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
 	ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;

+ 20 - 0
drivers/gpu/drm/i915/intel_atomic_plane.c

@@ -85,6 +85,8 @@ intel_plane_duplicate_state(struct drm_plane *plane)
 
 
 	__drm_atomic_helper_plane_duplicate_state(plane, state);
 	__drm_atomic_helper_plane_duplicate_state(plane, state);
 
 
+	intel_state->vma = NULL;
+
 	return state;
 	return state;
 }
 }
 
 
@@ -100,6 +102,24 @@ void
 intel_plane_destroy_state(struct drm_plane *plane,
 intel_plane_destroy_state(struct drm_plane *plane,
 			  struct drm_plane_state *state)
 			  struct drm_plane_state *state)
 {
 {
+	struct i915_vma *vma;
+
+	vma = fetch_and_zero(&to_intel_plane_state(state)->vma);
+
+	/*
+	 * FIXME: Normally intel_cleanup_plane_fb handles destruction of vma.
+	 * We currently don't clear all planes during driver unload, so we have
+	 * to be able to unpin vma here for now.
+	 *
+	 * Normally this can only happen during unload when kmscon is disabled
+	 * and userspace doesn't attempt to set a framebuffer at all.
+	 */
+	if (vma) {
+		mutex_lock(&plane->dev->struct_mutex);
+		intel_unpin_fb_vma(vma);
+		mutex_unlock(&plane->dev->struct_mutex);
+	}
+
 	drm_atomic_helper_plane_destroy_state(plane, state);
 	drm_atomic_helper_plane_destroy_state(plane, state);
 }
 }
 
 

+ 5 - 4
drivers/gpu/drm/i915/intel_crt.c

@@ -499,6 +499,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
 	struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
 	struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
 	struct edid *edid;
 	struct edid *edid;
 	struct i2c_adapter *i2c;
 	struct i2c_adapter *i2c;
+	bool ret = false;
 
 
 	BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
 	BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
 
 
@@ -515,17 +516,17 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
 		 */
 		 */
 		if (!is_digital) {
 		if (!is_digital) {
 			DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
 			DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
-			return true;
+			ret = true;
+		} else {
+			DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
 		}
 		}
-
-		DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
 	} else {
 	} else {
 		DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
 		DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
 	}
 	}
 
 
 	kfree(edid);
 	kfree(edid);
 
 
-	return false;
+	return ret;
 }
 }
 
 
 static enum drm_connector_status
 static enum drm_connector_status

+ 82 - 87
drivers/gpu/drm/i915/intel_display.c

@@ -2235,24 +2235,22 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
 			i915_vma_pin_fence(vma);
 			i915_vma_pin_fence(vma);
 	}
 	}
 
 
+	i915_vma_get(vma);
 err:
 err:
 	intel_runtime_pm_put(dev_priv);
 	intel_runtime_pm_put(dev_priv);
 	return vma;
 	return vma;
 }
 }
 
 
-void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
+void intel_unpin_fb_vma(struct i915_vma *vma)
 {
 {
-	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-	struct i915_ggtt_view view;
-	struct i915_vma *vma;
+	lockdep_assert_held(&vma->vm->dev->struct_mutex);
 
 
-	WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
-
-	intel_fill_fb_ggtt_view(&view, fb, rotation);
-	vma = i915_gem_object_to_ggtt(obj, &view);
+	if (WARN_ON_ONCE(!vma))
+		return;
 
 
 	i915_vma_unpin_fence(vma);
 	i915_vma_unpin_fence(vma);
 	i915_gem_object_unpin_from_display_plane(vma);
 	i915_gem_object_unpin_from_display_plane(vma);
+	i915_vma_put(vma);
 }
 }
 
 
 static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
 static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
@@ -2585,8 +2583,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
 			 * We only keep the x/y offsets, so push all of the
 			 * We only keep the x/y offsets, so push all of the
 			 * gtt offset into the x/y offsets.
 			 * gtt offset into the x/y offsets.
 			 */
 			 */
-			_intel_adjust_tile_offset(&x, &y, tile_size,
-						  tile_width, tile_height, pitch_tiles,
+			_intel_adjust_tile_offset(&x, &y,
+						  tile_width, tile_height,
+						  tile_size, pitch_tiles,
 						  gtt_offset_rotated * tile_size, 0);
 						  gtt_offset_rotated * tile_size, 0);
 
 
 			gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
 			gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
@@ -2746,7 +2745,6 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
 	struct drm_device *dev = intel_crtc->base.dev;
 	struct drm_device *dev = intel_crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_crtc *c;
 	struct drm_crtc *c;
-	struct intel_crtc *i;
 	struct drm_i915_gem_object *obj;
 	struct drm_i915_gem_object *obj;
 	struct drm_plane *primary = intel_crtc->base.primary;
 	struct drm_plane *primary = intel_crtc->base.primary;
 	struct drm_plane_state *plane_state = primary->state;
 	struct drm_plane_state *plane_state = primary->state;
@@ -2771,20 +2769,20 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
 	 * an fb with another CRTC instead
 	 * an fb with another CRTC instead
 	 */
 	 */
 	for_each_crtc(dev, c) {
 	for_each_crtc(dev, c) {
-		i = to_intel_crtc(c);
+		struct intel_plane_state *state;
 
 
 		if (c == &intel_crtc->base)
 		if (c == &intel_crtc->base)
 			continue;
 			continue;
 
 
-		if (!i->active)
+		if (!to_intel_crtc(c)->active)
 			continue;
 			continue;
 
 
-		fb = c->primary->fb;
-		if (!fb)
+		state = to_intel_plane_state(c->primary->state);
+		if (!state->vma)
 			continue;
 			continue;
 
 
-		obj = intel_fb_obj(fb);
-		if (i915_gem_object_ggtt_offset(obj, NULL) == plane_config->base) {
+		if (intel_plane_ggtt_offset(state) == plane_config->base) {
+			fb = c->primary->fb;
 			drm_framebuffer_reference(fb);
 			drm_framebuffer_reference(fb);
 			goto valid_fb;
 			goto valid_fb;
 		}
 		}
@@ -2805,6 +2803,19 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
 	return;
 	return;
 
 
 valid_fb:
 valid_fb:
+	mutex_lock(&dev->struct_mutex);
+	intel_state->vma =
+		intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
+	mutex_unlock(&dev->struct_mutex);
+	if (IS_ERR(intel_state->vma)) {
+		DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
+			  intel_crtc->pipe, PTR_ERR(intel_state->vma));
+
+		intel_state->vma = NULL;
+		drm_framebuffer_unreference(fb);
+		return;
+	}
+
 	plane_state->src_x = 0;
 	plane_state->src_x = 0;
 	plane_state->src_y = 0;
 	plane_state->src_y = 0;
 	plane_state->src_w = fb->width << 16;
 	plane_state->src_w = fb->width << 16;
@@ -3100,13 +3111,13 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
 	if (INTEL_GEN(dev_priv) >= 4) {
 	if (INTEL_GEN(dev_priv) >= 4) {
 		I915_WRITE(DSPSURF(plane),
 		I915_WRITE(DSPSURF(plane),
-			   intel_fb_gtt_offset(fb, rotation) +
+			   intel_plane_ggtt_offset(plane_state) +
 			   intel_crtc->dspaddr_offset);
 			   intel_crtc->dspaddr_offset);
 		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
 		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
 		I915_WRITE(DSPLINOFF(plane), linear_offset);
 		I915_WRITE(DSPLINOFF(plane), linear_offset);
 	} else {
 	} else {
 		I915_WRITE(DSPADDR(plane),
 		I915_WRITE(DSPADDR(plane),
-			   intel_fb_gtt_offset(fb, rotation) +
+			   intel_plane_ggtt_offset(plane_state) +
 			   intel_crtc->dspaddr_offset);
 			   intel_crtc->dspaddr_offset);
 	}
 	}
 	POSTING_READ(reg);
 	POSTING_READ(reg);
@@ -3203,7 +3214,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
 
 
 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
 	I915_WRITE(DSPSURF(plane),
 	I915_WRITE(DSPSURF(plane),
-		   intel_fb_gtt_offset(fb, rotation) +
+		   intel_plane_ggtt_offset(plane_state) +
 		   intel_crtc->dspaddr_offset);
 		   intel_crtc->dspaddr_offset);
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
 		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
@@ -3226,23 +3237,6 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
 	}
 	}
 }
 }
 
 
-u32 intel_fb_gtt_offset(struct drm_framebuffer *fb,
-			unsigned int rotation)
-{
-	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-	struct i915_ggtt_view view;
-	struct i915_vma *vma;
-
-	intel_fill_fb_ggtt_view(&view, fb, rotation);
-
-	vma = i915_gem_object_to_ggtt(obj, &view);
-	if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
-		 view.type))
-		return -1;
-
-	return i915_ggtt_offset(vma);
-}
-
 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
 {
 {
 	struct drm_device *dev = intel_crtc->base.dev;
 	struct drm_device *dev = intel_crtc->base.dev;
@@ -3437,7 +3431,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
 	}
 	}
 
 
 	I915_WRITE(PLANE_SURF(pipe, 0),
 	I915_WRITE(PLANE_SURF(pipe, 0),
-		   intel_fb_gtt_offset(fb, rotation) + surf_addr);
+		   intel_plane_ggtt_offset(plane_state) + surf_addr);
 
 
 	POSTING_READ(PLANE_SURF(pipe, 0));
 	POSTING_READ(PLANE_SURF(pipe, 0));
 }
 }
@@ -6849,6 +6843,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
 	}
 	}
 
 
 	state = drm_atomic_state_alloc(crtc->dev);
 	state = drm_atomic_state_alloc(crtc->dev);
+	if (!state) {
+		DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
+			      crtc->base.id, crtc->name);
+		return;
+	}
+
 	state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
 	state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
 
 
 	/* Everything's already locked, -EDEADLK can't happen. */
 	/* Everything's already locked, -EDEADLK can't happen. */
@@ -11246,6 +11246,7 @@ found:
 	}
 	}
 
 
 	old->restore_state = restore_state;
 	old->restore_state = restore_state;
+	drm_atomic_state_put(state);
 
 
 	/* let the connector get through one full cycle before testing */
 	/* let the connector get through one full cycle before testing */
 	intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
 	intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
@@ -11525,7 +11526,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
 		flush_work(&work->mmio_work);
 		flush_work(&work->mmio_work);
 
 
 	mutex_lock(&dev->struct_mutex);
 	mutex_lock(&dev->struct_mutex);
-	intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
+	intel_unpin_fb_vma(work->old_vma);
 	i915_gem_object_put(work->pending_flip_obj);
 	i915_gem_object_put(work->pending_flip_obj);
 	mutex_unlock(&dev->struct_mutex);
 	mutex_unlock(&dev->struct_mutex);
 
 
@@ -12235,8 +12236,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 		goto cleanup_pending;
 		goto cleanup_pending;
 	}
 	}
 
 
-	work->gtt_offset = intel_fb_gtt_offset(fb, primary->state->rotation);
-	work->gtt_offset += intel_crtc->dspaddr_offset;
+	work->old_vma = to_intel_plane_state(primary->state)->vma;
+	to_intel_plane_state(primary->state)->vma = vma;
+
+	work->gtt_offset = i915_ggtt_offset(vma) + intel_crtc->dspaddr_offset;
 	work->rotation = crtc->primary->state->rotation;
 	work->rotation = crtc->primary->state->rotation;
 
 
 	/*
 	/*
@@ -12290,7 +12293,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 cleanup_request:
 cleanup_request:
 	i915_add_request_no_flush(request);
 	i915_add_request_no_flush(request);
 cleanup_unpin:
 cleanup_unpin:
-	intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
+	to_intel_plane_state(primary->state)->vma = work->old_vma;
+	intel_unpin_fb_vma(vma);
 cleanup_pending:
 cleanup_pending:
 	atomic_dec(&intel_crtc->unpin_work_count);
 	atomic_dec(&intel_crtc->unpin_work_count);
 unlock:
 unlock:
@@ -14515,8 +14519,14 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence,
 		break;
 		break;
 
 
 	case FENCE_FREE:
 	case FENCE_FREE:
-		drm_atomic_state_put(&state->base);
-		break;
+		{
+			struct intel_atomic_helper *helper =
+				&to_i915(state->base.dev)->atomic_helper;
+
+			if (llist_add(&state->freed, &helper->free_list))
+				schedule_work(&helper->free_work);
+			break;
+		}
 	}
 	}
 
 
 	return NOTIFY_DONE;
 	return NOTIFY_DONE;
@@ -14777,6 +14787,8 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 			DRM_DEBUG_KMS("failed to pin object\n");
 			DRM_DEBUG_KMS("failed to pin object\n");
 			return PTR_ERR(vma);
 			return PTR_ERR(vma);
 		}
 		}
+
+		to_intel_plane_state(new_state)->vma = vma;
 	}
 	}
 
 
 	return 0;
 	return 0;
@@ -14795,19 +14807,12 @@ void
 intel_cleanup_plane_fb(struct drm_plane *plane,
 intel_cleanup_plane_fb(struct drm_plane *plane,
 		       struct drm_plane_state *old_state)
 		       struct drm_plane_state *old_state)
 {
 {
-	struct drm_i915_private *dev_priv = to_i915(plane->dev);
-	struct intel_plane_state *old_intel_state;
-	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
-	struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
-
-	old_intel_state = to_intel_plane_state(old_state);
-
-	if (!obj && !old_obj)
-		return;
+	struct i915_vma *vma;
 
 
-	if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
-	    !INTEL_INFO(dev_priv)->cursor_needs_physical))
-		intel_unpin_fb_obj(old_state->fb, old_state->rotation);
+	/* Should only be called after a successful intel_prepare_plane_fb()! */
+	vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma);
+	if (vma)
+		intel_unpin_fb_vma(vma);
 }
 }
 
 
 int
 int
@@ -15149,7 +15154,7 @@ intel_update_cursor_plane(struct drm_plane *plane,
 	if (!obj)
 	if (!obj)
 		addr = 0;
 		addr = 0;
 	else if (!INTEL_INFO(dev_priv)->cursor_needs_physical)
 	else if (!INTEL_INFO(dev_priv)->cursor_needs_physical)
-		addr = i915_gem_object_ggtt_offset(obj, NULL);
+		addr = intel_plane_ggtt_offset(state);
 	else
 	else
 		addr = obj->phys_handle->busaddr;
 		addr = obj->phys_handle->busaddr;
 
 
@@ -16395,6 +16400,18 @@ fail:
 	drm_modeset_acquire_fini(&ctx);
 	drm_modeset_acquire_fini(&ctx);
 }
 }
 
 
+static void intel_atomic_helper_free_state(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, typeof(*dev_priv), atomic_helper.free_work);
+	struct intel_atomic_state *state, *next;
+	struct llist_node *freed;
+
+	freed = llist_del_all(&dev_priv->atomic_helper.free_list);
+	llist_for_each_entry_safe(state, next, freed, freed)
+		drm_atomic_state_put(&state->base);
+}
+
 int intel_modeset_init(struct drm_device *dev)
 int intel_modeset_init(struct drm_device *dev)
 {
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -16414,6 +16431,9 @@ int intel_modeset_init(struct drm_device *dev)
 
 
 	dev->mode_config.funcs = &intel_mode_funcs;
 	dev->mode_config.funcs = &intel_mode_funcs;
 
 
+	INIT_WORK(&dev_priv->atomic_helper.free_work,
+		  intel_atomic_helper_free_state);
+
 	intel_init_quirks(dev);
 	intel_init_quirks(dev);
 
 
 	intel_init_pm(dev_priv);
 	intel_init_pm(dev_priv);
@@ -17027,47 +17047,19 @@ void intel_display_resume(struct drm_device *dev)
 
 
 	if (ret)
 	if (ret)
 		DRM_ERROR("Restoring old state failed with %i\n", ret);
 		DRM_ERROR("Restoring old state failed with %i\n", ret);
-	drm_atomic_state_put(state);
+	if (state)
+		drm_atomic_state_put(state);
 }
 }
 
 
 void intel_modeset_gem_init(struct drm_device *dev)
 void intel_modeset_gem_init(struct drm_device *dev)
 {
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_crtc *c;
-	struct drm_i915_gem_object *obj;
 
 
 	intel_init_gt_powersave(dev_priv);
 	intel_init_gt_powersave(dev_priv);
 
 
 	intel_modeset_init_hw(dev);
 	intel_modeset_init_hw(dev);
 
 
 	intel_setup_overlay(dev_priv);
 	intel_setup_overlay(dev_priv);
-
-	/*
-	 * Make sure any fbs we allocated at startup are properly
-	 * pinned & fenced.  When we do the allocation it's too early
-	 * for this.
-	 */
-	for_each_crtc(dev, c) {
-		struct i915_vma *vma;
-
-		obj = intel_fb_obj(c->primary->fb);
-		if (obj == NULL)
-			continue;
-
-		mutex_lock(&dev->struct_mutex);
-		vma = intel_pin_and_fence_fb_obj(c->primary->fb,
-						 c->primary->state->rotation);
-		mutex_unlock(&dev->struct_mutex);
-		if (IS_ERR(vma)) {
-			DRM_ERROR("failed to pin boot fb on pipe %d\n",
-				  to_intel_crtc(c)->pipe);
-			drm_framebuffer_unreference(c->primary->fb);
-			c->primary->fb = NULL;
-			c->primary->crtc = c->primary->state->crtc = NULL;
-			update_state_fb(c->primary);
-			c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
-		}
-	}
 }
 }
 
 
 int intel_connector_register(struct drm_connector *connector)
 int intel_connector_register(struct drm_connector *connector)
@@ -17097,6 +17089,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
 {
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
 
+	flush_work(&dev_priv->atomic_helper.free_work);
+	WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
+
 	intel_disable_gt_powersave(dev_priv);
 	intel_disable_gt_powersave(dev_priv);
 
 
 	/*
 	/*

+ 9 - 2
drivers/gpu/drm/i915/intel_drv.h

@@ -370,11 +370,14 @@ struct intel_atomic_state {
 	struct skl_wm_values wm_results;
 	struct skl_wm_values wm_results;
 
 
 	struct i915_sw_fence commit_ready;
 	struct i915_sw_fence commit_ready;
+
+	struct llist_node freed;
 };
 };
 
 
 struct intel_plane_state {
 struct intel_plane_state {
 	struct drm_plane_state base;
 	struct drm_plane_state base;
 	struct drm_rect clip;
 	struct drm_rect clip;
+	struct i915_vma *vma;
 
 
 	struct {
 	struct {
 		u32 offset;
 		u32 offset;
@@ -1044,6 +1047,7 @@ struct intel_flip_work {
 	struct work_struct mmio_work;
 	struct work_struct mmio_work;
 
 
 	struct drm_crtc *crtc;
 	struct drm_crtc *crtc;
+	struct i915_vma *old_vma;
 	struct drm_framebuffer *old_fb;
 	struct drm_framebuffer *old_fb;
 	struct drm_i915_gem_object *pending_flip_obj;
 	struct drm_i915_gem_object *pending_flip_obj;
 	struct drm_pending_vblank_event *event;
 	struct drm_pending_vblank_event *event;
@@ -1271,7 +1275,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
 				    struct drm_modeset_acquire_ctx *ctx);
 				    struct drm_modeset_acquire_ctx *ctx);
 struct i915_vma *
 struct i915_vma *
 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
-void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
+void intel_unpin_fb_vma(struct i915_vma *vma);
 struct drm_framebuffer *
 struct drm_framebuffer *
 __intel_framebuffer_create(struct drm_device *dev,
 __intel_framebuffer_create(struct drm_device *dev,
 			   struct drm_mode_fb_cmd2 *mode_cmd,
 			   struct drm_mode_fb_cmd2 *mode_cmd,
@@ -1360,7 +1364,10 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
 int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
 int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
 int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
 int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
 
 
-u32 intel_fb_gtt_offset(struct drm_framebuffer *fb, unsigned int rotation);
+static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
+{
+	return i915_ggtt_offset(state->vma);
+}
 
 
 u32 skl_plane_ctl_format(uint32_t pixel_format);
 u32 skl_plane_ctl_format(uint32_t pixel_format);
 u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
 u32 skl_plane_ctl_tiling(uint64_t fb_modifier);

+ 20 - 32
drivers/gpu/drm/i915/intel_fbc.c

@@ -173,7 +173,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
 	if (IS_I945GM(dev_priv))
 	if (IS_I945GM(dev_priv))
 		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
 		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
 	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
 	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
-	fbc_ctl |= params->fb.fence_reg;
+	fbc_ctl |= params->vma->fence->id;
 	I915_WRITE(FBC_CONTROL, fbc_ctl);
 	I915_WRITE(FBC_CONTROL, fbc_ctl);
 }
 }
 
 
@@ -193,8 +193,8 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
 	else
 	else
 		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
 		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
 
 
-	if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
-		dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg;
+	if (params->vma->fence) {
+		dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id;
 		I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
 		I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
 	} else {
 	} else {
 		I915_WRITE(DPFC_FENCE_YOFF, 0);
 		I915_WRITE(DPFC_FENCE_YOFF, 0);
@@ -251,13 +251,14 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
 		break;
 		break;
 	}
 	}
 
 
-	if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
+	if (params->vma->fence) {
 		dpfc_ctl |= DPFC_CTL_FENCE_EN;
 		dpfc_ctl |= DPFC_CTL_FENCE_EN;
 		if (IS_GEN5(dev_priv))
 		if (IS_GEN5(dev_priv))
-			dpfc_ctl |= params->fb.fence_reg;
+			dpfc_ctl |= params->vma->fence->id;
 		if (IS_GEN6(dev_priv)) {
 		if (IS_GEN6(dev_priv)) {
 			I915_WRITE(SNB_DPFC_CTL_SA,
 			I915_WRITE(SNB_DPFC_CTL_SA,
-				   SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+				   SNB_CPU_FENCE_ENABLE |
+				   params->vma->fence->id);
 			I915_WRITE(DPFC_CPU_FENCE_OFFSET,
 			I915_WRITE(DPFC_CPU_FENCE_OFFSET,
 				   params->crtc.fence_y_offset);
 				   params->crtc.fence_y_offset);
 		}
 		}
@@ -269,7 +270,8 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
 	}
 	}
 
 
 	I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
 	I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
-	I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID);
+	I915_WRITE(ILK_FBC_RT_BASE,
+		   i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID);
 	/* enable it... */
 	/* enable it... */
 	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
 
@@ -319,10 +321,11 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
 		break;
 		break;
 	}
 	}
 
 
-	if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
+	if (params->vma->fence) {
 		dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
 		dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
 		I915_WRITE(SNB_DPFC_CTL_SA,
 		I915_WRITE(SNB_DPFC_CTL_SA,
-			   SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+			   SNB_CPU_FENCE_ENABLE |
+			   params->vma->fence->id);
 		I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
 		I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
 	} else {
 	} else {
 		I915_WRITE(SNB_DPFC_CTL_SA,0);
 		I915_WRITE(SNB_DPFC_CTL_SA,0);
@@ -727,14 +730,6 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
 	return effective_w <= max_w && effective_h <= max_h;
 	return effective_w <= max_w && effective_h <= max_h;
 }
 }
 
 
-/* XXX replace me when we have VMA tracking for intel_plane_state */
-static int get_fence_id(struct drm_framebuffer *fb)
-{
-	struct i915_vma *vma = i915_gem_object_to_ggtt(intel_fb_obj(fb), NULL);
-
-	return vma && vma->fence ? vma->fence->id : I915_FENCE_REG_NONE;
-}
-
 static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
 static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
 					 struct intel_crtc_state *crtc_state,
 					 struct intel_crtc_state *crtc_state,
 					 struct intel_plane_state *plane_state)
 					 struct intel_plane_state *plane_state)
@@ -743,7 +738,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
 	struct intel_fbc *fbc = &dev_priv->fbc;
 	struct intel_fbc *fbc = &dev_priv->fbc;
 	struct intel_fbc_state_cache *cache = &fbc->state_cache;
 	struct intel_fbc_state_cache *cache = &fbc->state_cache;
 	struct drm_framebuffer *fb = plane_state->base.fb;
 	struct drm_framebuffer *fb = plane_state->base.fb;
-	struct drm_i915_gem_object *obj;
+
+	cache->vma = NULL;
 
 
 	cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
 	cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
@@ -758,16 +754,10 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
 	if (!cache->plane.visible)
 	if (!cache->plane.visible)
 		return;
 		return;
 
 
-	obj = intel_fb_obj(fb);
-
-	/* FIXME: We lack the proper locking here, so only run this on the
-	 * platforms that need. */
-	if (IS_GEN(dev_priv, 5, 6))
-		cache->fb.ilk_ggtt_offset = i915_gem_object_ggtt_offset(obj, NULL);
 	cache->fb.pixel_format = fb->pixel_format;
 	cache->fb.pixel_format = fb->pixel_format;
 	cache->fb.stride = fb->pitches[0];
 	cache->fb.stride = fb->pitches[0];
-	cache->fb.fence_reg = get_fence_id(fb);
-	cache->fb.tiling_mode = i915_gem_object_get_tiling(obj);
+
+	cache->vma = plane_state->vma;
 }
 }
 
 
 static bool intel_fbc_can_activate(struct intel_crtc *crtc)
 static bool intel_fbc_can_activate(struct intel_crtc *crtc)
@@ -784,7 +774,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
 		return false;
 		return false;
 	}
 	}
 
 
-	if (!cache->plane.visible) {
+	if (!cache->vma) {
 		fbc->no_fbc_reason = "primary plane not visible";
 		fbc->no_fbc_reason = "primary plane not visible";
 		return false;
 		return false;
 	}
 	}
@@ -807,8 +797,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
 	 * so have no fence associated with it) due to aperture constaints
 	 * so have no fence associated with it) due to aperture constaints
 	 * at the time of pinning.
 	 * at the time of pinning.
 	 */
 	 */
-	if (cache->fb.tiling_mode != I915_TILING_X ||
-	    cache->fb.fence_reg == I915_FENCE_REG_NONE) {
+	if (!cache->vma->fence) {
 		fbc->no_fbc_reason = "framebuffer not tiled or fenced";
 		fbc->no_fbc_reason = "framebuffer not tiled or fenced";
 		return false;
 		return false;
 	}
 	}
@@ -888,17 +877,16 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
 	 * zero. */
 	 * zero. */
 	memset(params, 0, sizeof(*params));
 	memset(params, 0, sizeof(*params));
 
 
+	params->vma = cache->vma;
+
 	params->crtc.pipe = crtc->pipe;
 	params->crtc.pipe = crtc->pipe;
 	params->crtc.plane = crtc->plane;
 	params->crtc.plane = crtc->plane;
 	params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
 	params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
 
 
 	params->fb.pixel_format = cache->fb.pixel_format;
 	params->fb.pixel_format = cache->fb.pixel_format;
 	params->fb.stride = cache->fb.stride;
 	params->fb.stride = cache->fb.stride;
-	params->fb.fence_reg = cache->fb.fence_reg;
 
 
 	params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
 	params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
-
-	params->fb.ggtt_offset = cache->fb.ilk_ggtt_offset;
 }
 }
 
 
 static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
 static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,

+ 5 - 2
drivers/gpu/drm/i915/intel_fbdev.c

@@ -284,7 +284,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
 out_destroy_fbi:
 out_destroy_fbi:
 	drm_fb_helper_release_fbi(helper);
 	drm_fb_helper_release_fbi(helper);
 out_unpin:
 out_unpin:
-	intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
+	intel_unpin_fb_vma(vma);
 out_unlock:
 out_unlock:
 	mutex_unlock(&dev->struct_mutex);
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
 	return ret;
@@ -549,7 +549,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
 
 
 	if (ifbdev->fb) {
 	if (ifbdev->fb) {
 		mutex_lock(&ifbdev->helper.dev->struct_mutex);
 		mutex_lock(&ifbdev->helper.dev->struct_mutex);
-		intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
+		intel_unpin_fb_vma(ifbdev->vma);
 		mutex_unlock(&ifbdev->helper.dev->struct_mutex);
 		mutex_unlock(&ifbdev->helper.dev->struct_mutex);
 
 
 		drm_framebuffer_remove(&ifbdev->fb->base);
 		drm_framebuffer_remove(&ifbdev->fb->base);
@@ -742,6 +742,9 @@ void intel_fbdev_initial_config_async(struct drm_device *dev)
 {
 {
 	struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
 	struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
 
 
+	if (!ifbdev)
+		return;
+
 	ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
 	ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
 }
 }
 
 

+ 2 - 2
drivers/gpu/drm/i915/intel_hotplug.c

@@ -180,7 +180,7 @@ static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
 
 
 	/* Enable polling and queue hotplug re-enabling. */
 	/* Enable polling and queue hotplug re-enabling. */
 	if (hpd_disabled) {
 	if (hpd_disabled) {
-		drm_kms_helper_poll_enable(dev);
+		drm_kms_helper_poll_enable_locked(dev);
 		mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
 		mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
 				 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
 				 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
 	}
 	}
@@ -511,7 +511,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
 	}
 	}
 
 
 	if (enabled)
 	if (enabled)
-		drm_kms_helper_poll_enable(dev);
+		drm_kms_helper_poll_enable_locked(dev);
 
 
 	mutex_unlock(&dev->mode_config.mutex);
 	mutex_unlock(&dev->mode_config.mutex);
 
 

+ 4 - 4
drivers/gpu/drm/i915/intel_sprite.c

@@ -273,7 +273,7 @@ skl_update_plane(struct drm_plane *drm_plane,
 
 
 	I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
 	I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
 	I915_WRITE(PLANE_SURF(pipe, plane),
 	I915_WRITE(PLANE_SURF(pipe, plane),
-		   intel_fb_gtt_offset(fb, rotation) + surf_addr);
+		   intel_plane_ggtt_offset(plane_state) + surf_addr);
 	POSTING_READ(PLANE_SURF(pipe, plane));
 	POSTING_READ(PLANE_SURF(pipe, plane));
 }
 }
 
 
@@ -458,7 +458,7 @@ vlv_update_plane(struct drm_plane *dplane,
 	I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
 	I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
 	I915_WRITE(SPCNTR(pipe, plane), sprctl);
 	I915_WRITE(SPCNTR(pipe, plane), sprctl);
 	I915_WRITE(SPSURF(pipe, plane),
 	I915_WRITE(SPSURF(pipe, plane),
-		   intel_fb_gtt_offset(fb, rotation) + sprsurf_offset);
+		   intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
 	POSTING_READ(SPSURF(pipe, plane));
 	POSTING_READ(SPSURF(pipe, plane));
 }
 }
 
 
@@ -594,7 +594,7 @@ ivb_update_plane(struct drm_plane *plane,
 		I915_WRITE(SPRSCALE(pipe), sprscale);
 		I915_WRITE(SPRSCALE(pipe), sprscale);
 	I915_WRITE(SPRCTL(pipe), sprctl);
 	I915_WRITE(SPRCTL(pipe), sprctl);
 	I915_WRITE(SPRSURF(pipe),
 	I915_WRITE(SPRSURF(pipe),
-		   intel_fb_gtt_offset(fb, rotation) + sprsurf_offset);
+		   intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
 	POSTING_READ(SPRSURF(pipe));
 	POSTING_READ(SPRSURF(pipe));
 }
 }
 
 
@@ -721,7 +721,7 @@ ilk_update_plane(struct drm_plane *plane,
 	I915_WRITE(DVSSCALE(pipe), dvsscale);
 	I915_WRITE(DVSSCALE(pipe), dvsscale);
 	I915_WRITE(DVSCNTR(pipe), dvscntr);
 	I915_WRITE(DVSCNTR(pipe), dvscntr);
 	I915_WRITE(DVSSURF(pipe),
 	I915_WRITE(DVSSURF(pipe),
-		   intel_fb_gtt_offset(fb, rotation) + dvssurf_offset);
+		   intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
 	POSTING_READ(DVSSURF(pipe));
 	POSTING_READ(DVSSURF(pipe));
 }
 }
 
 

+ 2 - 1
drivers/gpu/drm/nouveau/dispnv04/hw.c

@@ -222,6 +222,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
 		uint32_t mpllP;
 		uint32_t mpllP;
 
 
 		pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
 		pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
+		mpllP = (mpllP >> 8) & 0xf;
 		if (!mpllP)
 		if (!mpllP)
 			mpllP = 4;
 			mpllP = 4;
 
 
@@ -232,7 +233,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
 		uint32_t clock;
 		uint32_t clock;
 
 
 		pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
 		pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
-		return clock;
+		return clock / 1000;
 	}
 	}
 
 
 	ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);
 	ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);

+ 2 - 1
drivers/gpu/drm/nouveau/nouveau_display.c

@@ -411,7 +411,8 @@ nouveau_display_init(struct drm_device *dev)
 		return ret;
 		return ret;
 
 
 	/* enable polling for external displays */
 	/* enable polling for external displays */
-	drm_kms_helper_poll_enable(dev);
+	if (!dev->mode_config.poll_enabled)
+		drm_kms_helper_poll_enable(dev);
 
 
 	/* enable hotplug interrupts */
 	/* enable hotplug interrupts */
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {

+ 4 - 1
drivers/gpu/drm/nouveau/nouveau_drm.c

@@ -773,7 +773,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
 	pci_set_master(pdev);
 	pci_set_master(pdev);
 
 
 	ret = nouveau_do_resume(drm_dev, true);
 	ret = nouveau_do_resume(drm_dev, true);
-	drm_kms_helper_poll_enable(drm_dev);
+
+	if (!drm_dev->mode_config.poll_enabled)
+		drm_kms_helper_poll_enable(drm_dev);
+
 	/* do magic */
 	/* do magic */
 	nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
 	nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
 	vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
 	vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);

+ 2 - 0
drivers/gpu/drm/nouveau/nouveau_drv.h

@@ -165,6 +165,8 @@ struct nouveau_drm {
 	struct backlight_device *backlight;
 	struct backlight_device *backlight;
 	struct list_head bl_connectors;
 	struct list_head bl_connectors;
 	struct work_struct hpd_work;
 	struct work_struct hpd_work;
+	struct work_struct fbcon_work;
+	int fbcon_new_state;
 #ifdef CONFIG_ACPI
 #ifdef CONFIG_ACPI
 	struct notifier_block acpi_nb;
 	struct notifier_block acpi_nb;
 #endif
 #endif

+ 34 - 9
drivers/gpu/drm/nouveau/nouveau_fbcon.c

@@ -470,19 +470,43 @@ static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
 	.fb_probe = nouveau_fbcon_create,
 	.fb_probe = nouveau_fbcon_create,
 };
 };
 
 
+static void
+nouveau_fbcon_set_suspend_work(struct work_struct *work)
+{
+	struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work);
+	int state = READ_ONCE(drm->fbcon_new_state);
+
+	if (state == FBINFO_STATE_RUNNING)
+		pm_runtime_get_sync(drm->dev->dev);
+
+	console_lock();
+	if (state == FBINFO_STATE_RUNNING)
+		nouveau_fbcon_accel_restore(drm->dev);
+	drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
+	if (state != FBINFO_STATE_RUNNING)
+		nouveau_fbcon_accel_save_disable(drm->dev);
+	console_unlock();
+
+	if (state == FBINFO_STATE_RUNNING) {
+		pm_runtime_mark_last_busy(drm->dev->dev);
+		pm_runtime_put_sync(drm->dev->dev);
+	}
+}
+
 void
 void
 nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
 nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
 {
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	if (drm->fbcon) {
-		console_lock();
-		if (state == FBINFO_STATE_RUNNING)
-			nouveau_fbcon_accel_restore(dev);
-		drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
-		if (state != FBINFO_STATE_RUNNING)
-			nouveau_fbcon_accel_save_disable(dev);
-		console_unlock();
-	}
+
+	if (!drm->fbcon)
+		return;
+
+	drm->fbcon_new_state = state;
+	/* Since runtime resume can happen as a result of a sysfs operation,
+	 * it's possible we already have the console locked. So handle fbcon
+	 * init/deinit from a seperate work thread
+	 */
+	schedule_work(&drm->fbcon_work);
 }
 }
 
 
 int
 int
@@ -502,6 +526,7 @@ nouveau_fbcon_init(struct drm_device *dev)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
 	drm->fbcon = fbcon;
 	drm->fbcon = fbcon;
+	INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
 
 
 	drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
 	drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
 
 

+ 1 - 0
drivers/gpu/drm/nouveau/nouveau_fence.h

@@ -99,6 +99,7 @@ struct nv84_fence_priv {
 	struct nouveau_bo *bo;
 	struct nouveau_bo *bo;
 	struct nouveau_bo *bo_gart;
 	struct nouveau_bo *bo_gart;
 	u32 *suspend;
 	u32 *suspend;
+	struct mutex mutex;
 };
 };
 
 
 int  nv84_fence_context_new(struct nouveau_channel *);
 int  nv84_fence_context_new(struct nouveau_channel *);

+ 1 - 1
drivers/gpu/drm/nouveau/nouveau_led.h

@@ -42,7 +42,7 @@ nouveau_led(struct drm_device *dev)
 }
 }
 
 
 /* nouveau_led.c */
 /* nouveau_led.c */
-#if IS_ENABLED(CONFIG_LEDS_CLASS)
+#if IS_REACHABLE(CONFIG_LEDS_CLASS)
 int  nouveau_led_init(struct drm_device *dev);
 int  nouveau_led_init(struct drm_device *dev);
 void nouveau_led_suspend(struct drm_device *dev);
 void nouveau_led_suspend(struct drm_device *dev);
 void nouveau_led_resume(struct drm_device *dev);
 void nouveau_led_resume(struct drm_device *dev);

+ 2 - 1
drivers/gpu/drm/nouveau/nouveau_usif.c

@@ -313,7 +313,8 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
 	if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) {
 	if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) {
 		/* block access to objects not created via this interface */
 		/* block access to objects not created via this interface */
 		owner = argv->v0.owner;
 		owner = argv->v0.owner;
-		if (argv->v0.object == 0ULL)
+		if (argv->v0.object == 0ULL &&
+		    argv->v0.type != NVIF_IOCTL_V0_DEL)
 			argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
 			argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
 		else
 		else
 			argv->v0.owner = NVDRM_OBJECT_USIF;
 			argv->v0.owner = NVDRM_OBJECT_USIF;

+ 6 - 0
drivers/gpu/drm/nouveau/nv50_display.c

@@ -4052,6 +4052,11 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
 		}
 		}
 	}
 	}
 
 
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		if (crtc->state->event)
+			drm_crtc_vblank_get(crtc);
+	}
+
 	/* Update plane(s). */
 	/* Update plane(s). */
 	for_each_plane_in_state(state, plane, plane_state, i) {
 	for_each_plane_in_state(state, plane, plane_state, i) {
 		struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state);
 		struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state);
@@ -4101,6 +4106,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
 			drm_crtc_send_vblank_event(crtc, crtc->state->event);
 			drm_crtc_send_vblank_event(crtc, crtc->state->event);
 			spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
 			spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
 			crtc->state->event = NULL;
 			crtc->state->event = NULL;
+			drm_crtc_vblank_put(crtc);
 		}
 		}
 	}
 	}
 
 

+ 6 - 0
drivers/gpu/drm/nouveau/nv84_fence.c

@@ -107,8 +107,10 @@ nv84_fence_context_del(struct nouveau_channel *chan)
 	struct nv84_fence_chan *fctx = chan->fence;
 	struct nv84_fence_chan *fctx = chan->fence;
 
 
 	nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
 	nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
+	mutex_lock(&priv->mutex);
 	nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
 	nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
 	nouveau_bo_vma_del(priv->bo, &fctx->vma);
 	nouveau_bo_vma_del(priv->bo, &fctx->vma);
+	mutex_unlock(&priv->mutex);
 	nouveau_fence_context_del(&fctx->base);
 	nouveau_fence_context_del(&fctx->base);
 	chan->fence = NULL;
 	chan->fence = NULL;
 	nouveau_fence_context_free(&fctx->base);
 	nouveau_fence_context_free(&fctx->base);
@@ -134,11 +136,13 @@ nv84_fence_context_new(struct nouveau_channel *chan)
 	fctx->base.sync32 = nv84_fence_sync32;
 	fctx->base.sync32 = nv84_fence_sync32;
 	fctx->base.sequence = nv84_fence_read(chan);
 	fctx->base.sequence = nv84_fence_read(chan);
 
 
+	mutex_lock(&priv->mutex);
 	ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
 	ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
 	if (ret == 0) {
 	if (ret == 0) {
 		ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
 		ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
 					&fctx->vma_gart);
 					&fctx->vma_gart);
 	}
 	}
+	mutex_unlock(&priv->mutex);
 
 
 	if (ret)
 	if (ret)
 		nv84_fence_context_del(chan);
 		nv84_fence_context_del(chan);
@@ -212,6 +216,8 @@ nv84_fence_create(struct nouveau_drm *drm)
 	priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
 	priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
 	priv->base.uevent = true;
 	priv->base.uevent = true;
 
 
+	mutex_init(&priv->mutex);
+
 	/* Use VRAM if there is any ; otherwise fallback to system memory */
 	/* Use VRAM if there is any ; otherwise fallback to system memory */
 	domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
 	domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
 			 /*
 			 /*

برخی فایل ها در این مقایسه diff نمایش داده نمی شوند زیرا تعداد فایل ها بسیار زیاد است