Эх сурвалжийг харах

Back merge tag 'v4.4-rc4' into drm-next

We've picked up a few conflicts and it would be nice
to resolve them before we move onwards.
Dave Airlie 9 жил өмнө
parent
commit
e876b41ab0
100 өөрчлөгдсөн 1018 нэмэгдсэн , 547 устгасан
  1. 6 0
      Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
  2. 7 5
      MAINTAINERS
  3. 1 1
      Makefile
  4. 1 0
      arch/arm/boot/dts/armada-38x.dtsi
  5. 12 0
      arch/arm/include/asm/kvm_emulate.h
  6. 3 2
      arch/arm/kvm/mmio.c
  7. 2 2
      arch/arm/kvm/mmu.c
  8. 10 10
      arch/arm/kvm/psci.c
  9. 13 5
      arch/arm64/include/asm/kvm_emulate.h
  10. 1 1
      arch/arm64/kvm/handle_exit.c
  11. 60 63
      arch/arm64/kvm/sys_regs.c
  12. 4 4
      arch/arm64/kvm/sys_regs.h
  13. 2 2
      arch/arm64/kvm/sys_regs_generic_v8.c
  14. 32 15
      arch/arm64/net/bpf_jit_comp.c
  15. 1 3
      arch/mn10300/Kconfig
  16. 0 1
      arch/x86/boot/boot.h
  17. 2 0
      arch/x86/boot/video-mode.c
  18. 2 0
      arch/x86/boot/video.c
  19. 18 1
      arch/x86/entry/entry_64.S
  20. 9 7
      arch/x86/include/asm/page_types.h
  21. 4 10
      arch/x86/include/asm/pgtable_types.h
  22. 0 1
      arch/x86/include/asm/x86_init.h
  23. 1 0
      arch/x86/kernel/cpu/microcode/core.c
  24. 12 0
      arch/x86/kernel/pmem.c
  25. 0 2
      arch/x86/kernel/setup.c
  26. 10 7
      arch/x86/kernel/signal.c
  27. 5 4
      arch/x86/kernel/smpboot.c
  28. 3 3
      arch/x86/mm/mpx.c
  29. 2 11
      arch/x86/pci/bus_numa.c
  30. 7 14
      block/blk-core.c
  31. 3 0
      block/blk-merge.c
  32. 16 20
      block/blk-settings.c
  33. 3 0
      block/blk-sysfs.c
  34. 1 1
      block/partition-generic.c
  35. 2 2
      crypto/algif_aead.c
  36. 3 3
      crypto/algif_skcipher.c
  37. 2 2
      drivers/acpi/Kconfig
  38. 47 18
      drivers/acpi/nfit.c
  39. 2 1
      drivers/acpi/nfit.h
  40. 7 0
      drivers/acpi/pci_root.c
  41. 1 2
      drivers/base/power/domain.c
  42. 0 3
      drivers/base/power/domain_governor.c
  43. 33 61
      drivers/block/null_blk.c
  44. 1 0
      drivers/block/rbd.c
  45. 10 4
      drivers/cpufreq/cpufreq.c
  46. 1 1
      drivers/crypto/nx/nx-aes-ccm.c
  47. 2 1
      drivers/crypto/nx/nx-aes-gcm.c
  48. 1 1
      drivers/crypto/talitos.c
  49. 5 2
      drivers/gpio/gpio-74xx-mmio.c
  50. 0 2
      drivers/gpio/gpio-omap.c
  51. 2 0
      drivers/gpio/gpio-palmas.c
  52. 5 1
      drivers/gpio/gpio-syscon.c
  53. 56 49
      drivers/gpio/gpio-tegra.c
  54. 1 1
      drivers/gpio/gpiolib.c
  55. 3 0
      drivers/gpu/drm/amd/amdgpu/amdgpu.h
  56. 4 2
      drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
  57. 79 29
      drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
  58. 3 2
      drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
  59. 47 1
      drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
  60. 5 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
  61. 1 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
  62. 11 6
      drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
  63. 18 3
      drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
  64. 4 1
      drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
  65. 4 1
      drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
  66. 4 1
      drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
  67. 1 1
      drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
  68. 1 1
      drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
  69. 3 2
      drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
  70. 5 0
      drivers/gpu/drm/drm_drv.c
  71. 56 28
      drivers/gpu/drm/drm_fops.c
  72. 53 1
      drivers/gpu/drm/drm_irq.c
  73. 10 2
      drivers/gpu/drm/i915/i915_gem.c
  74. 27 9
      drivers/gpu/drm/i915/i915_gem_fence.c
  75. 0 1
      drivers/gpu/drm/i915/intel_display.c
  76. 2 1
      drivers/gpu/drm/i915/intel_dp.c
  77. 3 4
      drivers/gpu/drm/imx/imx-drm-core.c
  78. 2 1
      drivers/gpu/drm/imx/imx-drm.h
  79. 1 0
      drivers/gpu/drm/imx/imx-tve.c
  80. 17 46
      drivers/gpu/drm/imx/ipuv3-crtc.c
  81. 4 5
      drivers/gpu/drm/imx/ipuv3-plane.c
  82. 1 1
      drivers/gpu/drm/imx/ipuv3-plane.h
  83. 4 0
      drivers/gpu/drm/imx/parallel-display.c
  84. 11 8
      drivers/gpu/drm/nouveau/nouveau_display.c
  85. 4 1
      drivers/gpu/drm/radeon/cik.c
  86. 4 1
      drivers/gpu/drm/radeon/evergreen.c
  87. 11 1
      drivers/gpu/drm/radeon/r100.c
  88. 1 1
      drivers/gpu/drm/radeon/r600.c
  89. 1 1
      drivers/gpu/drm/radeon/radeon.h
  90. 3 0
      drivers/gpu/drm/radeon/radeon_agp.c
  91. 20 1
      drivers/gpu/drm/radeon/radeon_connectors.c
  92. 79 27
      drivers/gpu/drm/radeon/radeon_display.c
  93. 4 4
      drivers/gpu/drm/radeon/radeon_irq_kms.c
  94. 49 1
      drivers/gpu/drm/radeon/radeon_kms.c
  95. 5 0
      drivers/gpu/drm/radeon/radeon_mode.h
  96. 3 1
      drivers/gpu/drm/radeon/radeon_pm.c
  97. 1 1
      drivers/gpu/drm/radeon/rs600.c
  98. 10 0
      drivers/gpu/drm/radeon/rs690.c
  99. 4 1
      drivers/gpu/drm/radeon/si.c
  100. 1 0
      drivers/gpu/drm/rockchip/rockchip_drm_gem.c

+ 6 - 0
Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt

@@ -8,6 +8,11 @@ Required properties:
 - phy-mode: See ethernet.txt file in the same directory
 - phy-mode: See ethernet.txt file in the same directory
 - clocks: a pointer to the reference clock for this device.
 - clocks: a pointer to the reference clock for this device.
 
 
+Optional properties:
+- tx-csum-limit: maximum mtu supported by port that allow TX checksum.
+  Value is presented in bytes. If not used, by default 1600B is set for
+  "marvell,armada-370-neta" and 9800B for others.
+
 Example:
 Example:
 
 
 ethernet@d0070000 {
 ethernet@d0070000 {
@@ -15,6 +20,7 @@ ethernet@d0070000 {
 	reg = <0xd0070000 0x2500>;
 	reg = <0xd0070000 0x2500>;
 	interrupts = <8>;
 	interrupts = <8>;
 	clocks = <&gate_clk 4>;
 	clocks = <&gate_clk 4>;
+	tx-csum-limit = <9800>
 	status = "okay";
 	status = "okay";
 	phy = <&phy0>;
 	phy = <&phy0>;
 	phy-mode = "rgmii-id";
 	phy-mode = "rgmii-id";

+ 7 - 5
MAINTAINERS

@@ -318,7 +318,7 @@ M:	Zhang Rui <rui.zhang@intel.com>
 L:	linux-acpi@vger.kernel.org
 L:	linux-acpi@vger.kernel.org
 W:	https://01.org/linux-acpi
 W:	https://01.org/linux-acpi
 S:	Supported
 S:	Supported
-F:	drivers/acpi/video.c
+F:	drivers/acpi/acpi_video.c
 
 
 ACPI WMI DRIVER
 ACPI WMI DRIVER
 L:	platform-driver-x86@vger.kernel.org
 L:	platform-driver-x86@vger.kernel.org
@@ -1847,7 +1847,7 @@ S:	Supported
 F:	drivers/net/wireless/ath/ath6kl/
 F:	drivers/net/wireless/ath/ath6kl/
 
 
 WILOCITY WIL6210 WIRELESS DRIVER
 WILOCITY WIL6210 WIRELESS DRIVER
-M:	Vladimir Kondratiev <qca_vkondrat@qca.qualcomm.com>
+M:	Maya Erez <qca_merez@qca.qualcomm.com>
 L:	linux-wireless@vger.kernel.org
 L:	linux-wireless@vger.kernel.org
 L:	wil6210@qca.qualcomm.com
 L:	wil6210@qca.qualcomm.com
 S:	Supported
 S:	Supported
@@ -9427,8 +9427,10 @@ F:	include/scsi/sg.h
 
 
 SCSI SUBSYSTEM
 SCSI SUBSYSTEM
 M:	"James E.J. Bottomley" <JBottomley@odin.com>
 M:	"James E.J. Bottomley" <JBottomley@odin.com>
-L:	linux-scsi@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git
+M:	"Martin K. Petersen" <martin.petersen@oracle.com>
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git
+L:	linux-scsi@vger.kernel.org
 S:	Maintained
 S:	Maintained
 F:	drivers/scsi/
 F:	drivers/scsi/
 F:	include/scsi/
 F:	include/scsi/
@@ -10903,9 +10905,9 @@ S:	Maintained
 F:	drivers/media/tuners/tua9001*
 F:	drivers/media/tuners/tua9001*
 
 
 TULIP NETWORK DRIVERS
 TULIP NETWORK DRIVERS
-M:	Grant Grundler <grundler@parisc-linux.org>
 L:	netdev@vger.kernel.org
 L:	netdev@vger.kernel.org
-S:	Maintained
+L:	linux-parisc@vger.kernel.org
+S:	Orphan
 F:	drivers/net/ethernet/dec/tulip/
 F:	drivers/net/ethernet/dec/tulip/
 
 
 TUN/TAP driver
 TUN/TAP driver

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 4
 VERSION = 4
 PATCHLEVEL = 4
 PATCHLEVEL = 4
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
 NAME = Blurry Fish Butt
 NAME = Blurry Fish Butt
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*

+ 1 - 0
arch/arm/boot/dts/armada-38x.dtsi

@@ -498,6 +498,7 @@
 				reg = <0x70000 0x4000>;
 				reg = <0x70000 0x4000>;
 				interrupts-extended = <&mpic 8>;
 				interrupts-extended = <&mpic 8>;
 				clocks = <&gateclk 4>;
 				clocks = <&gateclk 4>;
+				tx-csum-limit = <9800>;
 				status = "disabled";
 				status = "disabled";
 			};
 			};
 
 

+ 12 - 0
arch/arm/include/asm/kvm_emulate.h

@@ -28,6 +28,18 @@
 unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
 unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
 unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
 unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
 
 
+static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
+					 u8 reg_num)
+{
+	return *vcpu_reg(vcpu, reg_num);
+}
+
+static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
+				unsigned long val)
+{
+	*vcpu_reg(vcpu, reg_num) = val;
+}
+
 bool kvm_condition_valid(struct kvm_vcpu *vcpu);
 bool kvm_condition_valid(struct kvm_vcpu *vcpu);
 void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
 void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
 void kvm_inject_undefined(struct kvm_vcpu *vcpu);

+ 3 - 2
arch/arm/kvm/mmio.c

@@ -115,7 +115,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
 		trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
 		trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
 			       data);
 			       data);
 		data = vcpu_data_host_to_guest(vcpu, data, len);
 		data = vcpu_data_host_to_guest(vcpu, data, len);
-		*vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt) = data;
+		vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
 	}
 	}
 
 
 	return 0;
 	return 0;
@@ -186,7 +186,8 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
 	rt = vcpu->arch.mmio_decode.rt;
 	rt = vcpu->arch.mmio_decode.rt;
 
 
 	if (is_write) {
 	if (is_write) {
-		data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), len);
+		data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
+					       len);
 
 
 		trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
 		trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
 		mmio_write_buf(data_buf, len, data);
 		mmio_write_buf(data_buf, len, data);

+ 2 - 2
arch/arm/kvm/mmu.c

@@ -218,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
 			kvm_tlb_flush_vmid_ipa(kvm, addr);
 			kvm_tlb_flush_vmid_ipa(kvm, addr);
 
 
 			/* No need to invalidate the cache for device mappings */
 			/* No need to invalidate the cache for device mappings */
-			if (!kvm_is_device_pfn(__phys_to_pfn(addr)))
+			if (!kvm_is_device_pfn(pte_pfn(old_pte)))
 				kvm_flush_dcache_pte(old_pte);
 				kvm_flush_dcache_pte(old_pte);
 
 
 			put_page(virt_to_page(pte));
 			put_page(virt_to_page(pte));
@@ -310,7 +310,7 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
 
 
 	pte = pte_offset_kernel(pmd, addr);
 	pte = pte_offset_kernel(pmd, addr);
 	do {
 	do {
-		if (!pte_none(*pte) && !kvm_is_device_pfn(__phys_to_pfn(addr)))
+		if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
 			kvm_flush_dcache_pte(*pte);
 			kvm_flush_dcache_pte(*pte);
 	} while (pte++, addr += PAGE_SIZE, addr != end);
 	} while (pte++, addr += PAGE_SIZE, addr != end);
 }
 }

+ 10 - 10
arch/arm/kvm/psci.c

@@ -75,7 +75,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
 	unsigned long context_id;
 	unsigned long context_id;
 	phys_addr_t target_pc;
 	phys_addr_t target_pc;
 
 
-	cpu_id = *vcpu_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
+	cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
 	if (vcpu_mode_is_32bit(source_vcpu))
 	if (vcpu_mode_is_32bit(source_vcpu))
 		cpu_id &= ~((u32) 0);
 		cpu_id &= ~((u32) 0);
 
 
@@ -94,8 +94,8 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
 			return PSCI_RET_INVALID_PARAMS;
 			return PSCI_RET_INVALID_PARAMS;
 	}
 	}
 
 
-	target_pc = *vcpu_reg(source_vcpu, 2);
-	context_id = *vcpu_reg(source_vcpu, 3);
+	target_pc = vcpu_get_reg(source_vcpu, 2);
+	context_id = vcpu_get_reg(source_vcpu, 3);
 
 
 	kvm_reset_vcpu(vcpu);
 	kvm_reset_vcpu(vcpu);
 
 
@@ -114,7 +114,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
 	 * NOTE: We always update r0 (or x0) because for PSCI v0.1
 	 * NOTE: We always update r0 (or x0) because for PSCI v0.1
 	 * the general puspose registers are undefined upon CPU_ON.
 	 * the general puspose registers are undefined upon CPU_ON.
 	 */
 	 */
-	*vcpu_reg(vcpu, 0) = context_id;
+	vcpu_set_reg(vcpu, 0, context_id);
 	vcpu->arch.power_off = false;
 	vcpu->arch.power_off = false;
 	smp_mb();		/* Make sure the above is visible */
 	smp_mb();		/* Make sure the above is visible */
 
 
@@ -134,8 +134,8 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
 	struct kvm *kvm = vcpu->kvm;
 	struct kvm *kvm = vcpu->kvm;
 	struct kvm_vcpu *tmp;
 	struct kvm_vcpu *tmp;
 
 
-	target_affinity = *vcpu_reg(vcpu, 1);
-	lowest_affinity_level = *vcpu_reg(vcpu, 2);
+	target_affinity = vcpu_get_reg(vcpu, 1);
+	lowest_affinity_level = vcpu_get_reg(vcpu, 2);
 
 
 	/* Determine target affinity mask */
 	/* Determine target affinity mask */
 	target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
 	target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
@@ -209,7 +209,7 @@ int kvm_psci_version(struct kvm_vcpu *vcpu)
 static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
 static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
 {
 {
 	int ret = 1;
 	int ret = 1;
-	unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
+	unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
 	unsigned long val;
 	unsigned long val;
 
 
 	switch (psci_fn) {
 	switch (psci_fn) {
@@ -273,13 +273,13 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
 		break;
 		break;
 	}
 	}
 
 
-	*vcpu_reg(vcpu, 0) = val;
+	vcpu_set_reg(vcpu, 0, val);
 	return ret;
 	return ret;
 }
 }
 
 
 static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
 static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
 {
 {
-	unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
+	unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
 	unsigned long val;
 	unsigned long val;
 
 
 	switch (psci_fn) {
 	switch (psci_fn) {
@@ -295,7 +295,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
 		break;
 		break;
 	}
 	}
 
 
-	*vcpu_reg(vcpu, 0) = val;
+	vcpu_set_reg(vcpu, 0, val);
 	return 1;
 	return 1;
 }
 }
 
 

+ 13 - 5
arch/arm64/include/asm/kvm_emulate.h

@@ -100,13 +100,21 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
 }
 }
 
 
 /*
 /*
- * vcpu_reg should always be passed a register number coming from a
- * read of ESR_EL2. Otherwise, it may give the wrong result on AArch32
- * with banked registers.
+ * vcpu_get_reg and vcpu_set_reg should always be passed a register number
+ * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
+ * AArch32 with banked registers.
  */
  */
-static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
+static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
+					 u8 reg_num)
 {
 {
-	return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
+	return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
+}
+
+static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
+				unsigned long val)
+{
+	if (reg_num != 31)
+		vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
 }
 }
 
 
 /* Get vcpu SPSR for current mode */
 /* Get vcpu SPSR for current mode */

+ 1 - 1
arch/arm64/kvm/handle_exit.c

@@ -37,7 +37,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
 {
 	int ret;
 	int ret;
 
 
-	trace_kvm_hvc_arm64(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
+	trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
 			    kvm_vcpu_hvc_get_imm(vcpu));
 			    kvm_vcpu_hvc_get_imm(vcpu));
 
 
 	ret = kvm_psci_call(vcpu);
 	ret = kvm_psci_call(vcpu);

+ 60 - 63
arch/arm64/kvm/sys_regs.c

@@ -78,7 +78,7 @@ static u32 get_ccsidr(u32 csselr)
  * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
  * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
  */
  */
 static bool access_dcsw(struct kvm_vcpu *vcpu,
 static bool access_dcsw(struct kvm_vcpu *vcpu,
-			const struct sys_reg_params *p,
+			struct sys_reg_params *p,
 			const struct sys_reg_desc *r)
 			const struct sys_reg_desc *r)
 {
 {
 	if (!p->is_write)
 	if (!p->is_write)
@@ -94,21 +94,19 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
  * sys_regs and leave it in complete control of the caches.
  * sys_regs and leave it in complete control of the caches.
  */
  */
 static bool access_vm_reg(struct kvm_vcpu *vcpu,
 static bool access_vm_reg(struct kvm_vcpu *vcpu,
-			  const struct sys_reg_params *p,
+			  struct sys_reg_params *p,
 			  const struct sys_reg_desc *r)
 			  const struct sys_reg_desc *r)
 {
 {
-	unsigned long val;
 	bool was_enabled = vcpu_has_cache_enabled(vcpu);
 	bool was_enabled = vcpu_has_cache_enabled(vcpu);
 
 
 	BUG_ON(!p->is_write);
 	BUG_ON(!p->is_write);
 
 
-	val = *vcpu_reg(vcpu, p->Rt);
 	if (!p->is_aarch32) {
 	if (!p->is_aarch32) {
-		vcpu_sys_reg(vcpu, r->reg) = val;
+		vcpu_sys_reg(vcpu, r->reg) = p->regval;
 	} else {
 	} else {
 		if (!p->is_32bit)
 		if (!p->is_32bit)
-			vcpu_cp15_64_high(vcpu, r->reg) = val >> 32;
-		vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
+			vcpu_cp15_64_high(vcpu, r->reg) = upper_32_bits(p->regval);
+		vcpu_cp15_64_low(vcpu, r->reg) = lower_32_bits(p->regval);
 	}
 	}
 
 
 	kvm_toggle_cache(vcpu, was_enabled);
 	kvm_toggle_cache(vcpu, was_enabled);
@@ -122,22 +120,19 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
  * for both AArch64 and AArch32 accesses.
  * for both AArch64 and AArch32 accesses.
  */
  */
 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
-			   const struct sys_reg_params *p,
+			   struct sys_reg_params *p,
 			   const struct sys_reg_desc *r)
 			   const struct sys_reg_desc *r)
 {
 {
-	u64 val;
-
 	if (!p->is_write)
 	if (!p->is_write)
 		return read_from_write_only(vcpu, p);
 		return read_from_write_only(vcpu, p);
 
 
-	val = *vcpu_reg(vcpu, p->Rt);
-	vgic_v3_dispatch_sgi(vcpu, val);
+	vgic_v3_dispatch_sgi(vcpu, p->regval);
 
 
 	return true;
 	return true;
 }
 }
 
 
 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
-			const struct sys_reg_params *p,
+			struct sys_reg_params *p,
 			const struct sys_reg_desc *r)
 			const struct sys_reg_desc *r)
 {
 {
 	if (p->is_write)
 	if (p->is_write)
@@ -147,19 +142,19 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu,
 }
 }
 
 
 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
-			   const struct sys_reg_params *p,
+			   struct sys_reg_params *p,
 			   const struct sys_reg_desc *r)
 			   const struct sys_reg_desc *r)
 {
 {
 	if (p->is_write) {
 	if (p->is_write) {
 		return ignore_write(vcpu, p);
 		return ignore_write(vcpu, p);
 	} else {
 	} else {
-		*vcpu_reg(vcpu, p->Rt) = (1 << 3);
+		p->regval = (1 << 3);
 		return true;
 		return true;
 	}
 	}
 }
 }
 
 
 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
-				   const struct sys_reg_params *p,
+				   struct sys_reg_params *p,
 				   const struct sys_reg_desc *r)
 				   const struct sys_reg_desc *r)
 {
 {
 	if (p->is_write) {
 	if (p->is_write) {
@@ -167,7 +162,7 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
 	} else {
 	} else {
 		u32 val;
 		u32 val;
 		asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val));
 		asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val));
-		*vcpu_reg(vcpu, p->Rt) = val;
+		p->regval = val;
 		return true;
 		return true;
 	}
 	}
 }
 }
@@ -200,17 +195,17 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
  *   now use the debug registers.
  *   now use the debug registers.
  */
  */
 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
-			    const struct sys_reg_params *p,
+			    struct sys_reg_params *p,
 			    const struct sys_reg_desc *r)
 			    const struct sys_reg_desc *r)
 {
 {
 	if (p->is_write) {
 	if (p->is_write) {
-		vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
+		vcpu_sys_reg(vcpu, r->reg) = p->regval;
 		vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
 		vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
 	} else {
 	} else {
-		*vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg);
+		p->regval = vcpu_sys_reg(vcpu, r->reg);
 	}
 	}
 
 
-	trace_trap_reg(__func__, r->reg, p->is_write, *vcpu_reg(vcpu, p->Rt));
+	trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
 
 
 	return true;
 	return true;
 }
 }
@@ -225,10 +220,10 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
  * hyp.S code switches between host and guest values in future.
  * hyp.S code switches between host and guest values in future.
  */
  */
 static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
 static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
-			      const struct sys_reg_params *p,
+			      struct sys_reg_params *p,
 			      u64 *dbg_reg)
 			      u64 *dbg_reg)
 {
 {
-	u64 val = *vcpu_reg(vcpu, p->Rt);
+	u64 val = p->regval;
 
 
 	if (p->is_32bit) {
 	if (p->is_32bit) {
 		val &= 0xffffffffUL;
 		val &= 0xffffffffUL;
@@ -240,19 +235,16 @@ static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
 }
 }
 
 
 static inline void dbg_to_reg(struct kvm_vcpu *vcpu,
 static inline void dbg_to_reg(struct kvm_vcpu *vcpu,
-			      const struct sys_reg_params *p,
+			      struct sys_reg_params *p,
 			      u64 *dbg_reg)
 			      u64 *dbg_reg)
 {
 {
-	u64 val = *dbg_reg;
-
+	p->regval = *dbg_reg;
 	if (p->is_32bit)
 	if (p->is_32bit)
-		val &= 0xffffffffUL;
-
-	*vcpu_reg(vcpu, p->Rt) = val;
+		p->regval &= 0xffffffffUL;
 }
 }
 
 
 static inline bool trap_bvr(struct kvm_vcpu *vcpu,
 static inline bool trap_bvr(struct kvm_vcpu *vcpu,
-			    const struct sys_reg_params *p,
+			    struct sys_reg_params *p,
 			    const struct sys_reg_desc *rd)
 			    const struct sys_reg_desc *rd)
 {
 {
 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
@@ -294,7 +286,7 @@ static inline void reset_bvr(struct kvm_vcpu *vcpu,
 }
 }
 
 
 static inline bool trap_bcr(struct kvm_vcpu *vcpu,
 static inline bool trap_bcr(struct kvm_vcpu *vcpu,
-			    const struct sys_reg_params *p,
+			    struct sys_reg_params *p,
 			    const struct sys_reg_desc *rd)
 			    const struct sys_reg_desc *rd)
 {
 {
 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
@@ -337,7 +329,7 @@ static inline void reset_bcr(struct kvm_vcpu *vcpu,
 }
 }
 
 
 static inline bool trap_wvr(struct kvm_vcpu *vcpu,
 static inline bool trap_wvr(struct kvm_vcpu *vcpu,
-			    const struct sys_reg_params *p,
+			    struct sys_reg_params *p,
 			    const struct sys_reg_desc *rd)
 			    const struct sys_reg_desc *rd)
 {
 {
 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
@@ -380,7 +372,7 @@ static inline void reset_wvr(struct kvm_vcpu *vcpu,
 }
 }
 
 
 static inline bool trap_wcr(struct kvm_vcpu *vcpu,
 static inline bool trap_wcr(struct kvm_vcpu *vcpu,
-			    const struct sys_reg_params *p,
+			    struct sys_reg_params *p,
 			    const struct sys_reg_desc *rd)
 			    const struct sys_reg_desc *rd)
 {
 {
 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
@@ -687,7 +679,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
 };
 };
 
 
 static bool trap_dbgidr(struct kvm_vcpu *vcpu,
 static bool trap_dbgidr(struct kvm_vcpu *vcpu,
-			const struct sys_reg_params *p,
+			struct sys_reg_params *p,
 			const struct sys_reg_desc *r)
 			const struct sys_reg_desc *r)
 {
 {
 	if (p->is_write) {
 	if (p->is_write) {
@@ -697,23 +689,23 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu,
 		u64 pfr = read_system_reg(SYS_ID_AA64PFR0_EL1);
 		u64 pfr = read_system_reg(SYS_ID_AA64PFR0_EL1);
 		u32 el3 = !!cpuid_feature_extract_field(pfr, ID_AA64PFR0_EL3_SHIFT);
 		u32 el3 = !!cpuid_feature_extract_field(pfr, ID_AA64PFR0_EL3_SHIFT);
 
 
-		*vcpu_reg(vcpu, p->Rt) = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
-					  (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
-					  (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) |
-					  (6 << 16) | (el3 << 14) | (el3 << 12));
+		p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
+			     (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
+			     (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
+			     | (6 << 16) | (el3 << 14) | (el3 << 12));
 		return true;
 		return true;
 	}
 	}
 }
 }
 
 
 static bool trap_debug32(struct kvm_vcpu *vcpu,
 static bool trap_debug32(struct kvm_vcpu *vcpu,
-			 const struct sys_reg_params *p,
+			 struct sys_reg_params *p,
 			 const struct sys_reg_desc *r)
 			 const struct sys_reg_desc *r)
 {
 {
 	if (p->is_write) {
 	if (p->is_write) {
-		vcpu_cp14(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
+		vcpu_cp14(vcpu, r->reg) = p->regval;
 		vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
 		vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
 	} else {
 	} else {
-		*vcpu_reg(vcpu, p->Rt) = vcpu_cp14(vcpu, r->reg);
+		p->regval = vcpu_cp14(vcpu, r->reg);
 	}
 	}
 
 
 	return true;
 	return true;
@@ -731,7 +723,7 @@ static bool trap_debug32(struct kvm_vcpu *vcpu,
  */
  */
 
 
 static inline bool trap_xvr(struct kvm_vcpu *vcpu,
 static inline bool trap_xvr(struct kvm_vcpu *vcpu,
-			    const struct sys_reg_params *p,
+			    struct sys_reg_params *p,
 			    const struct sys_reg_desc *rd)
 			    const struct sys_reg_desc *rd)
 {
 {
 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
@@ -740,12 +732,12 @@ static inline bool trap_xvr(struct kvm_vcpu *vcpu,
 		u64 val = *dbg_reg;
 		u64 val = *dbg_reg;
 
 
 		val &= 0xffffffffUL;
 		val &= 0xffffffffUL;
-		val |= *vcpu_reg(vcpu, p->Rt) << 32;
+		val |= p->regval << 32;
 		*dbg_reg = val;
 		*dbg_reg = val;
 
 
 		vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
 		vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
 	} else {
 	} else {
-		*vcpu_reg(vcpu, p->Rt) = *dbg_reg >> 32;
+		p->regval = *dbg_reg >> 32;
 	}
 	}
 
 
 	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
 	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
@@ -991,7 +983,7 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
  * Return 0 if the access has been handled, and -1 if not.
  * Return 0 if the access has been handled, and -1 if not.
  */
  */
 static int emulate_cp(struct kvm_vcpu *vcpu,
 static int emulate_cp(struct kvm_vcpu *vcpu,
-		      const struct sys_reg_params *params,
+		      struct sys_reg_params *params,
 		      const struct sys_reg_desc *table,
 		      const struct sys_reg_desc *table,
 		      size_t num)
 		      size_t num)
 {
 {
@@ -1062,12 +1054,12 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
 {
 {
 	struct sys_reg_params params;
 	struct sys_reg_params params;
 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
+	int Rt = (hsr >> 5) & 0xf;
 	int Rt2 = (hsr >> 10) & 0xf;
 	int Rt2 = (hsr >> 10) & 0xf;
 
 
 	params.is_aarch32 = true;
 	params.is_aarch32 = true;
 	params.is_32bit = false;
 	params.is_32bit = false;
 	params.CRm = (hsr >> 1) & 0xf;
 	params.CRm = (hsr >> 1) & 0xf;
-	params.Rt = (hsr >> 5) & 0xf;
 	params.is_write = ((hsr & 1) == 0);
 	params.is_write = ((hsr & 1) == 0);
 
 
 	params.Op0 = 0;
 	params.Op0 = 0;
@@ -1076,15 +1068,12 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
 	params.CRn = 0;
 	params.CRn = 0;
 
 
 	/*
 	/*
-	 * Massive hack here. Store Rt2 in the top 32bits so we only
-	 * have one register to deal with. As we use the same trap
+	 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
 	 * backends between AArch32 and AArch64, we get away with it.
 	 * backends between AArch32 and AArch64, we get away with it.
 	 */
 	 */
 	if (params.is_write) {
 	if (params.is_write) {
-		u64 val = *vcpu_reg(vcpu, params.Rt);
-		val &= 0xffffffff;
-		val |= *vcpu_reg(vcpu, Rt2) << 32;
-		*vcpu_reg(vcpu, params.Rt) = val;
+		params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
+		params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
 	}
 	}
 
 
 	if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
 	if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
@@ -1095,11 +1084,10 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
 	unhandled_cp_access(vcpu, &params);
 	unhandled_cp_access(vcpu, &params);
 
 
 out:
 out:
-	/* Do the opposite hack for the read side */
+	/* Split up the value between registers for the read side */
 	if (!params.is_write) {
 	if (!params.is_write) {
-		u64 val = *vcpu_reg(vcpu, params.Rt);
-		val >>= 32;
-		*vcpu_reg(vcpu, Rt2) = val;
+		vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
+		vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
 	}
 	}
 
 
 	return 1;
 	return 1;
@@ -1118,21 +1106,24 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
 {
 {
 	struct sys_reg_params params;
 	struct sys_reg_params params;
 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
+	int Rt  = (hsr >> 5) & 0xf;
 
 
 	params.is_aarch32 = true;
 	params.is_aarch32 = true;
 	params.is_32bit = true;
 	params.is_32bit = true;
 	params.CRm = (hsr >> 1) & 0xf;
 	params.CRm = (hsr >> 1) & 0xf;
-	params.Rt  = (hsr >> 5) & 0xf;
+	params.regval = vcpu_get_reg(vcpu, Rt);
 	params.is_write = ((hsr & 1) == 0);
 	params.is_write = ((hsr & 1) == 0);
 	params.CRn = (hsr >> 10) & 0xf;
 	params.CRn = (hsr >> 10) & 0xf;
 	params.Op0 = 0;
 	params.Op0 = 0;
 	params.Op1 = (hsr >> 14) & 0x7;
 	params.Op1 = (hsr >> 14) & 0x7;
 	params.Op2 = (hsr >> 17) & 0x7;
 	params.Op2 = (hsr >> 17) & 0x7;
 
 
-	if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
-		return 1;
-	if (!emulate_cp(vcpu, &params, global, nr_global))
+	if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
+	    !emulate_cp(vcpu, &params, global, nr_global)) {
+		if (!params.is_write)
+			vcpu_set_reg(vcpu, Rt, params.regval);
 		return 1;
 		return 1;
+	}
 
 
 	unhandled_cp_access(vcpu, &params);
 	unhandled_cp_access(vcpu, &params);
 	return 1;
 	return 1;
@@ -1175,7 +1166,7 @@ int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
 }
 }
 
 
 static int emulate_sys_reg(struct kvm_vcpu *vcpu,
 static int emulate_sys_reg(struct kvm_vcpu *vcpu,
-			   const struct sys_reg_params *params)
+			   struct sys_reg_params *params)
 {
 {
 	size_t num;
 	size_t num;
 	const struct sys_reg_desc *table, *r;
 	const struct sys_reg_desc *table, *r;
@@ -1230,6 +1221,8 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
 {
 	struct sys_reg_params params;
 	struct sys_reg_params params;
 	unsigned long esr = kvm_vcpu_get_hsr(vcpu);
 	unsigned long esr = kvm_vcpu_get_hsr(vcpu);
+	int Rt = (esr >> 5) & 0x1f;
+	int ret;
 
 
 	trace_kvm_handle_sys_reg(esr);
 	trace_kvm_handle_sys_reg(esr);
 
 
@@ -1240,10 +1233,14 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
 	params.CRn = (esr >> 10) & 0xf;
 	params.CRn = (esr >> 10) & 0xf;
 	params.CRm = (esr >> 1) & 0xf;
 	params.CRm = (esr >> 1) & 0xf;
 	params.Op2 = (esr >> 17) & 0x7;
 	params.Op2 = (esr >> 17) & 0x7;
-	params.Rt = (esr >> 5) & 0x1f;
+	params.regval = vcpu_get_reg(vcpu, Rt);
 	params.is_write = !(esr & 1);
 	params.is_write = !(esr & 1);
 
 
-	return emulate_sys_reg(vcpu, &params);
+	ret = emulate_sys_reg(vcpu, &params);
+
+	if (!params.is_write)
+		vcpu_set_reg(vcpu, Rt, params.regval);
+	return ret;
 }
 }
 
 
 /******************************************************************************
 /******************************************************************************

+ 4 - 4
arch/arm64/kvm/sys_regs.h

@@ -28,7 +28,7 @@ struct sys_reg_params {
 	u8	CRn;
 	u8	CRn;
 	u8	CRm;
 	u8	CRm;
 	u8	Op2;
 	u8	Op2;
-	u8	Rt;
+	u64	regval;
 	bool	is_write;
 	bool	is_write;
 	bool	is_aarch32;
 	bool	is_aarch32;
 	bool	is_32bit;	/* Only valid if is_aarch32 is true */
 	bool	is_32bit;	/* Only valid if is_aarch32 is true */
@@ -44,7 +44,7 @@ struct sys_reg_desc {
 
 
 	/* Trapped access from guest, if non-NULL. */
 	/* Trapped access from guest, if non-NULL. */
 	bool (*access)(struct kvm_vcpu *,
 	bool (*access)(struct kvm_vcpu *,
-		       const struct sys_reg_params *,
+		       struct sys_reg_params *,
 		       const struct sys_reg_desc *);
 		       const struct sys_reg_desc *);
 
 
 	/* Initialization for vcpu. */
 	/* Initialization for vcpu. */
@@ -77,9 +77,9 @@ static inline bool ignore_write(struct kvm_vcpu *vcpu,
 }
 }
 
 
 static inline bool read_zero(struct kvm_vcpu *vcpu,
 static inline bool read_zero(struct kvm_vcpu *vcpu,
-			     const struct sys_reg_params *p)
+			     struct sys_reg_params *p)
 {
 {
-	*vcpu_reg(vcpu, p->Rt) = 0;
+	p->regval = 0;
 	return true;
 	return true;
 }
 }
 
 

+ 2 - 2
arch/arm64/kvm/sys_regs_generic_v8.c

@@ -31,13 +31,13 @@
 #include "sys_regs.h"
 #include "sys_regs.h"
 
 
 static bool access_actlr(struct kvm_vcpu *vcpu,
 static bool access_actlr(struct kvm_vcpu *vcpu,
-			 const struct sys_reg_params *p,
+			 struct sys_reg_params *p,
 			 const struct sys_reg_desc *r)
 			 const struct sys_reg_desc *r)
 {
 {
 	if (p->is_write)
 	if (p->is_write)
 		return ignore_write(vcpu, p);
 		return ignore_write(vcpu, p);
 
 
-	*vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, ACTLR_EL1);
+	p->regval = vcpu_sys_reg(vcpu, ACTLR_EL1);
 	return true;
 	return true;
 }
 }
 
 

+ 32 - 15
arch/arm64/net/bpf_jit_comp.c

@@ -139,6 +139,12 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
 /* Stack must be multiples of 16B */
 /* Stack must be multiples of 16B */
 #define STACK_ALIGN(sz) (((sz) + 15) & ~15)
 #define STACK_ALIGN(sz) (((sz) + 15) & ~15)
 
 
+#define _STACK_SIZE \
+	(MAX_BPF_STACK \
+	 + 4 /* extra for skb_copy_bits buffer */)
+
+#define STACK_SIZE STACK_ALIGN(_STACK_SIZE)
+
 static void build_prologue(struct jit_ctx *ctx)
 static void build_prologue(struct jit_ctx *ctx)
 {
 {
 	const u8 r6 = bpf2a64[BPF_REG_6];
 	const u8 r6 = bpf2a64[BPF_REG_6];
@@ -150,10 +156,6 @@ static void build_prologue(struct jit_ctx *ctx)
 	const u8 rx = bpf2a64[BPF_REG_X];
 	const u8 rx = bpf2a64[BPF_REG_X];
 	const u8 tmp1 = bpf2a64[TMP_REG_1];
 	const u8 tmp1 = bpf2a64[TMP_REG_1];
 	const u8 tmp2 = bpf2a64[TMP_REG_2];
 	const u8 tmp2 = bpf2a64[TMP_REG_2];
-	int stack_size = MAX_BPF_STACK;
-
-	stack_size += 4; /* extra for skb_copy_bits buffer */
-	stack_size = STACK_ALIGN(stack_size);
 
 
 	/*
 	/*
 	 * BPF prog stack layout
 	 * BPF prog stack layout
@@ -165,12 +167,13 @@ static void build_prologue(struct jit_ctx *ctx)
 	 *                        | ... | callee saved registers
 	 *                        | ... | callee saved registers
 	 *                        +-----+
 	 *                        +-----+
 	 *                        |     | x25/x26
 	 *                        |     | x25/x26
-	 * BPF fp register => -80:+-----+
+	 * BPF fp register => -80:+-----+ <= (BPF_FP)
 	 *                        |     |
 	 *                        |     |
 	 *                        | ... | BPF prog stack
 	 *                        | ... | BPF prog stack
 	 *                        |     |
 	 *                        |     |
-	 *                        |     |
-	 * current A64_SP =>      +-----+
+	 *                        +-----+ <= (BPF_FP - MAX_BPF_STACK)
+	 *                        |RSVD | JIT scratchpad
+	 * current A64_SP =>      +-----+ <= (BPF_FP - STACK_SIZE)
 	 *                        |     |
 	 *                        |     |
 	 *                        | ... | Function call stack
 	 *                        | ... | Function call stack
 	 *                        |     |
 	 *                        |     |
@@ -196,7 +199,7 @@ static void build_prologue(struct jit_ctx *ctx)
 	emit(A64_MOV(1, fp, A64_SP), ctx);
 	emit(A64_MOV(1, fp, A64_SP), ctx);
 
 
 	/* Set up function call stack */
 	/* Set up function call stack */
-	emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx);
+	emit(A64_SUB_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);
 
 
 	/* Clear registers A and X */
 	/* Clear registers A and X */
 	emit_a64_mov_i64(ra, 0, ctx);
 	emit_a64_mov_i64(ra, 0, ctx);
@@ -213,13 +216,9 @@ static void build_epilogue(struct jit_ctx *ctx)
 	const u8 fp = bpf2a64[BPF_REG_FP];
 	const u8 fp = bpf2a64[BPF_REG_FP];
 	const u8 tmp1 = bpf2a64[TMP_REG_1];
 	const u8 tmp1 = bpf2a64[TMP_REG_1];
 	const u8 tmp2 = bpf2a64[TMP_REG_2];
 	const u8 tmp2 = bpf2a64[TMP_REG_2];
-	int stack_size = MAX_BPF_STACK;
-
-	stack_size += 4; /* extra for skb_copy_bits buffer */
-	stack_size = STACK_ALIGN(stack_size);
 
 
 	/* We're done with BPF stack */
 	/* We're done with BPF stack */
-	emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx);
+	emit(A64_ADD_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);
 
 
 	/* Restore fs (x25) and x26 */
 	/* Restore fs (x25) and x26 */
 	emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
 	emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
@@ -591,7 +590,25 @@ emit_cond_jmp:
 	case BPF_ST | BPF_MEM | BPF_H:
 	case BPF_ST | BPF_MEM | BPF_H:
 	case BPF_ST | BPF_MEM | BPF_B:
 	case BPF_ST | BPF_MEM | BPF_B:
 	case BPF_ST | BPF_MEM | BPF_DW:
 	case BPF_ST | BPF_MEM | BPF_DW:
-		goto notyet;
+		/* Load imm to a register then store it */
+		ctx->tmp_used = 1;
+		emit_a64_mov_i(1, tmp2, off, ctx);
+		emit_a64_mov_i(1, tmp, imm, ctx);
+		switch (BPF_SIZE(code)) {
+		case BPF_W:
+			emit(A64_STR32(tmp, dst, tmp2), ctx);
+			break;
+		case BPF_H:
+			emit(A64_STRH(tmp, dst, tmp2), ctx);
+			break;
+		case BPF_B:
+			emit(A64_STRB(tmp, dst, tmp2), ctx);
+			break;
+		case BPF_DW:
+			emit(A64_STR64(tmp, dst, tmp2), ctx);
+			break;
+		}
+		break;
 
 
 	/* STX: *(size *)(dst + off) = src */
 	/* STX: *(size *)(dst + off) = src */
 	case BPF_STX | BPF_MEM | BPF_W:
 	case BPF_STX | BPF_MEM | BPF_W:
@@ -658,7 +675,7 @@ emit_cond_jmp:
 			return -EINVAL;
 			return -EINVAL;
 		}
 		}
 		emit_a64_mov_i64(r3, size, ctx);
 		emit_a64_mov_i64(r3, size, ctx);
-		emit(A64_ADD_I(1, r4, fp, MAX_BPF_STACK), ctx);
+		emit(A64_SUB_I(1, r4, fp, STACK_SIZE), ctx);
 		emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx);
 		emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx);
 		emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
 		emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
 		emit(A64_MOV(1, A64_FP, A64_SP), ctx);
 		emit(A64_MOV(1, A64_FP, A64_SP), ctx);

+ 1 - 3
arch/mn10300/Kconfig

@@ -1,6 +1,7 @@
 config MN10300
 config MN10300
 	def_bool y
 	def_bool y
 	select HAVE_OPROFILE
 	select HAVE_OPROFILE
+	select HAVE_UID16
 	select GENERIC_IRQ_SHOW
 	select GENERIC_IRQ_SHOW
 	select ARCH_WANT_IPC_PARSE_VERSION
 	select ARCH_WANT_IPC_PARSE_VERSION
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_ARCH_TRACEHOOK
@@ -37,9 +38,6 @@ config HIGHMEM
 config NUMA
 config NUMA
 	def_bool n
 	def_bool n
 
 
-config UID16
-	def_bool y
-
 config RWSEM_GENERIC_SPINLOCK
 config RWSEM_GENERIC_SPINLOCK
 	def_bool y
 	def_bool y
 
 

+ 0 - 1
arch/x86/boot/boot.h

@@ -23,7 +23,6 @@
 #include <stdarg.h>
 #include <stdarg.h>
 #include <linux/types.h>
 #include <linux/types.h>
 #include <linux/edd.h>
 #include <linux/edd.h>
-#include <asm/boot.h>
 #include <asm/setup.h>
 #include <asm/setup.h>
 #include "bitops.h"
 #include "bitops.h"
 #include "ctype.h"
 #include "ctype.h"

+ 2 - 0
arch/x86/boot/video-mode.c

@@ -19,6 +19,8 @@
 #include "video.h"
 #include "video.h"
 #include "vesa.h"
 #include "vesa.h"
 
 
+#include <uapi/asm/boot.h>
+
 /*
 /*
  * Common variables
  * Common variables
  */
  */

+ 2 - 0
arch/x86/boot/video.c

@@ -13,6 +13,8 @@
  * Select video mode
  * Select video mode
  */
  */
 
 
+#include <uapi/asm/boot.h>
+
 #include "boot.h"
 #include "boot.h"
 #include "video.h"
 #include "video.h"
 #include "vesa.h"
 #include "vesa.h"

+ 18 - 1
arch/x86/entry/entry_64.S

@@ -509,6 +509,17 @@ END(irq_entries_start)
 	 * tracking that we're in kernel mode.
 	 * tracking that we're in kernel mode.
 	 */
 	 */
 	SWAPGS
 	SWAPGS
+
+	/*
+	 * We need to tell lockdep that IRQs are off.  We can't do this until
+	 * we fix gsbase, and we should do it before enter_from_user_mode
+	 * (which can take locks).  Since TRACE_IRQS_OFF idempotent,
+	 * the simplest way to handle it is to just call it twice if
+	 * we enter from user mode.  There's no reason to optimize this since
+	 * TRACE_IRQS_OFF is a no-op if lockdep is off.
+	 */
+	TRACE_IRQS_OFF
+
 #ifdef CONFIG_CONTEXT_TRACKING
 #ifdef CONFIG_CONTEXT_TRACKING
 	call enter_from_user_mode
 	call enter_from_user_mode
 #endif
 #endif
@@ -1049,12 +1060,18 @@ ENTRY(error_entry)
 	SWAPGS
 	SWAPGS
 
 
 .Lerror_entry_from_usermode_after_swapgs:
 .Lerror_entry_from_usermode_after_swapgs:
+	/*
+	 * We need to tell lockdep that IRQs are off.  We can't do this until
+	 * we fix gsbase, and we should do it before enter_from_user_mode
+	 * (which can take locks).
+	 */
+	TRACE_IRQS_OFF
 #ifdef CONFIG_CONTEXT_TRACKING
 #ifdef CONFIG_CONTEXT_TRACKING
 	call enter_from_user_mode
 	call enter_from_user_mode
 #endif
 #endif
+	ret
 
 
 .Lerror_entry_done:
 .Lerror_entry_done:
-
 	TRACE_IRQS_OFF
 	TRACE_IRQS_OFF
 	ret
 	ret
 
 

+ 9 - 7
arch/x86/include/asm/page_types.h

@@ -9,19 +9,21 @@
 #define PAGE_SIZE	(_AC(1,UL) << PAGE_SHIFT)
 #define PAGE_SIZE	(_AC(1,UL) << PAGE_SHIFT)
 #define PAGE_MASK	(~(PAGE_SIZE-1))
 #define PAGE_MASK	(~(PAGE_SIZE-1))
 
 
+#define PMD_PAGE_SIZE		(_AC(1, UL) << PMD_SHIFT)
+#define PMD_PAGE_MASK		(~(PMD_PAGE_SIZE-1))
+
+#define PUD_PAGE_SIZE		(_AC(1, UL) << PUD_SHIFT)
+#define PUD_PAGE_MASK		(~(PUD_PAGE_SIZE-1))
+
 #define __PHYSICAL_MASK		((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1))
 #define __PHYSICAL_MASK		((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1))
 #define __VIRTUAL_MASK		((1UL << __VIRTUAL_MASK_SHIFT) - 1)
 #define __VIRTUAL_MASK		((1UL << __VIRTUAL_MASK_SHIFT) - 1)
 
 
-/* Cast PAGE_MASK to a signed type so that it is sign-extended if
+/* Cast *PAGE_MASK to a signed type so that it is sign-extended if
    virtual addresses are 32-bits but physical addresses are larger
    virtual addresses are 32-bits but physical addresses are larger
    (ie, 32-bit PAE). */
    (ie, 32-bit PAE). */
 #define PHYSICAL_PAGE_MASK	(((signed long)PAGE_MASK) & __PHYSICAL_MASK)
 #define PHYSICAL_PAGE_MASK	(((signed long)PAGE_MASK) & __PHYSICAL_MASK)
-
-#define PMD_PAGE_SIZE		(_AC(1, UL) << PMD_SHIFT)
-#define PMD_PAGE_MASK		(~(PMD_PAGE_SIZE-1))
-
-#define PUD_PAGE_SIZE		(_AC(1, UL) << PUD_SHIFT)
-#define PUD_PAGE_MASK		(~(PUD_PAGE_SIZE-1))
+#define PHYSICAL_PMD_PAGE_MASK	(((signed long)PMD_PAGE_MASK) & __PHYSICAL_MASK)
+#define PHYSICAL_PUD_PAGE_MASK	(((signed long)PUD_PAGE_MASK) & __PHYSICAL_MASK)
 
 
 #define HPAGE_SHIFT		PMD_SHIFT
 #define HPAGE_SHIFT		PMD_SHIFT
 #define HPAGE_SIZE		(_AC(1,UL) << HPAGE_SHIFT)
 #define HPAGE_SIZE		(_AC(1,UL) << HPAGE_SHIFT)

+ 4 - 10
arch/x86/include/asm/pgtable_types.h

@@ -279,17 +279,14 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
 static inline pudval_t pud_pfn_mask(pud_t pud)
 static inline pudval_t pud_pfn_mask(pud_t pud)
 {
 {
 	if (native_pud_val(pud) & _PAGE_PSE)
 	if (native_pud_val(pud) & _PAGE_PSE)
-		return PUD_PAGE_MASK & PHYSICAL_PAGE_MASK;
+		return PHYSICAL_PUD_PAGE_MASK;
 	else
 	else
 		return PTE_PFN_MASK;
 		return PTE_PFN_MASK;
 }
 }
 
 
 static inline pudval_t pud_flags_mask(pud_t pud)
 static inline pudval_t pud_flags_mask(pud_t pud)
 {
 {
-	if (native_pud_val(pud) & _PAGE_PSE)
-		return ~(PUD_PAGE_MASK & (pudval_t)PHYSICAL_PAGE_MASK);
-	else
-		return ~PTE_PFN_MASK;
+	return ~pud_pfn_mask(pud);
 }
 }
 
 
 static inline pudval_t pud_flags(pud_t pud)
 static inline pudval_t pud_flags(pud_t pud)
@@ -300,17 +297,14 @@ static inline pudval_t pud_flags(pud_t pud)
 static inline pmdval_t pmd_pfn_mask(pmd_t pmd)
 static inline pmdval_t pmd_pfn_mask(pmd_t pmd)
 {
 {
 	if (native_pmd_val(pmd) & _PAGE_PSE)
 	if (native_pmd_val(pmd) & _PAGE_PSE)
-		return PMD_PAGE_MASK & PHYSICAL_PAGE_MASK;
+		return PHYSICAL_PMD_PAGE_MASK;
 	else
 	else
 		return PTE_PFN_MASK;
 		return PTE_PFN_MASK;
 }
 }
 
 
 static inline pmdval_t pmd_flags_mask(pmd_t pmd)
 static inline pmdval_t pmd_flags_mask(pmd_t pmd)
 {
 {
-	if (native_pmd_val(pmd) & _PAGE_PSE)
-		return ~(PMD_PAGE_MASK & (pmdval_t)PHYSICAL_PAGE_MASK);
-	else
-		return ~PTE_PFN_MASK;
+	return ~pmd_pfn_mask(pmd);
 }
 }
 
 
 static inline pmdval_t pmd_flags(pmd_t pmd)
 static inline pmdval_t pmd_flags(pmd_t pmd)

+ 0 - 1
arch/x86/include/asm/x86_init.h

@@ -1,7 +1,6 @@
 #ifndef _ASM_X86_PLATFORM_H
 #ifndef _ASM_X86_PLATFORM_H
 #define _ASM_X86_PLATFORM_H
 #define _ASM_X86_PLATFORM_H
 
 
-#include <asm/pgtable_types.h>
 #include <asm/bootparam.h>
 #include <asm/bootparam.h>
 
 
 struct mpc_bus;
 struct mpc_bus;

+ 1 - 0
arch/x86/kernel/cpu/microcode/core.c

@@ -698,3 +698,4 @@ int __init microcode_init(void)
 	return error;
 	return error;
 
 
 }
 }
+late_initcall(microcode_init);

+ 12 - 0
arch/x86/kernel/pmem.c

@@ -4,10 +4,22 @@
  */
  */
 #include <linux/platform_device.h>
 #include <linux/platform_device.h>
 #include <linux/module.h>
 #include <linux/module.h>
+#include <linux/ioport.h>
+
+static int found(u64 start, u64 end, void *data)
+{
+	return 1;
+}
 
 
 static __init int register_e820_pmem(void)
 static __init int register_e820_pmem(void)
 {
 {
+	char *pmem = "Persistent Memory (legacy)";
 	struct platform_device *pdev;
 	struct platform_device *pdev;
+	int rc;
+
+	rc = walk_iomem_res(pmem, IORESOURCE_MEM, 0, -1, NULL, found);
+	if (rc <= 0)
+		return 0;
 
 
 	/*
 	/*
 	 * See drivers/nvdimm/e820.c for the implementation, this is
 	 * See drivers/nvdimm/e820.c for the implementation, this is

+ 0 - 2
arch/x86/kernel/setup.c

@@ -1250,8 +1250,6 @@ void __init setup_arch(char **cmdline_p)
 	if (efi_enabled(EFI_BOOT))
 	if (efi_enabled(EFI_BOOT))
 		efi_apply_memmap_quirks();
 		efi_apply_memmap_quirks();
 #endif
 #endif
-
-	microcode_init();
 }
 }
 
 
 #ifdef CONFIG_X86_32
 #ifdef CONFIG_X86_32

+ 10 - 7
arch/x86/kernel/signal.c

@@ -690,12 +690,15 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
 	signal_setup_done(failed, ksig, stepping);
 	signal_setup_done(failed, ksig, stepping);
 }
 }
 
 
-#ifdef CONFIG_X86_32
-#define NR_restart_syscall	__NR_restart_syscall
-#else /* !CONFIG_X86_32 */
-#define NR_restart_syscall	\
-	test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall
-#endif /* CONFIG_X86_32 */
+static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
+{
+#if defined(CONFIG_X86_32) || !defined(CONFIG_X86_64)
+	return __NR_restart_syscall;
+#else /* !CONFIG_X86_32 && CONFIG_X86_64 */
+	return test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall :
+		__NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
+#endif /* CONFIG_X86_32 || !CONFIG_X86_64 */
+}
 
 
 /*
 /*
  * Note that 'init' is a special process: it doesn't get signals it doesn't
  * Note that 'init' is a special process: it doesn't get signals it doesn't
@@ -724,7 +727,7 @@ void do_signal(struct pt_regs *regs)
 			break;
 			break;
 
 
 		case -ERESTART_RESTARTBLOCK:
 		case -ERESTART_RESTARTBLOCK:
-			regs->ax = NR_restart_syscall;
+			regs->ax = get_nr_restart_syscall(regs);
 			regs->ip -= 2;
 			regs->ip -= 2;
 			break;
 			break;
 		}
 		}

+ 5 - 4
arch/x86/kernel/smpboot.c

@@ -509,7 +509,7 @@ void __inquire_remote_apic(int apicid)
  */
  */
 #define UDELAY_10MS_DEFAULT 10000
 #define UDELAY_10MS_DEFAULT 10000
 
 
-static unsigned int init_udelay = INT_MAX;
+static unsigned int init_udelay = UINT_MAX;
 
 
 static int __init cpu_init_udelay(char *str)
 static int __init cpu_init_udelay(char *str)
 {
 {
@@ -522,14 +522,15 @@ early_param("cpu_init_udelay", cpu_init_udelay);
 static void __init smp_quirk_init_udelay(void)
 static void __init smp_quirk_init_udelay(void)
 {
 {
 	/* if cmdline changed it from default, leave it alone */
 	/* if cmdline changed it from default, leave it alone */
-	if (init_udelay != INT_MAX)
+	if (init_udelay != UINT_MAX)
 		return;
 		return;
 
 
 	/* if modern processor, use no delay */
 	/* if modern processor, use no delay */
 	if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
 	if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
-	    ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF)))
+	    ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) {
 		init_udelay = 0;
 		init_udelay = 0;
-
+		return;
+	}
 	/* else, use legacy delay */
 	/* else, use legacy delay */
 	init_udelay = UDELAY_10MS_DEFAULT;
 	init_udelay = UDELAY_10MS_DEFAULT;
 }
 }

+ 3 - 3
arch/x86/mm/mpx.c

@@ -101,19 +101,19 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
 	switch (type) {
 	switch (type) {
 	case REG_TYPE_RM:
 	case REG_TYPE_RM:
 		regno = X86_MODRM_RM(insn->modrm.value);
 		regno = X86_MODRM_RM(insn->modrm.value);
-		if (X86_REX_B(insn->rex_prefix.value) == 1)
+		if (X86_REX_B(insn->rex_prefix.value))
 			regno += 8;
 			regno += 8;
 		break;
 		break;
 
 
 	case REG_TYPE_INDEX:
 	case REG_TYPE_INDEX:
 		regno = X86_SIB_INDEX(insn->sib.value);
 		regno = X86_SIB_INDEX(insn->sib.value);
-		if (X86_REX_X(insn->rex_prefix.value) == 1)
+		if (X86_REX_X(insn->rex_prefix.value))
 			regno += 8;
 			regno += 8;
 		break;
 		break;
 
 
 	case REG_TYPE_BASE:
 	case REG_TYPE_BASE:
 		regno = X86_SIB_BASE(insn->sib.value);
 		regno = X86_SIB_BASE(insn->sib.value);
-		if (X86_REX_B(insn->rex_prefix.value) == 1)
+		if (X86_REX_B(insn->rex_prefix.value))
 			regno += 8;
 			regno += 8;
 		break;
 		break;
 
 

+ 2 - 11
arch/x86/pci/bus_numa.c

@@ -50,18 +50,9 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources)
 	if (!found)
 	if (!found)
 		pci_add_resource(resources, &info->busn);
 		pci_add_resource(resources, &info->busn);
 
 
-	list_for_each_entry(root_res, &info->resources, list) {
-		struct resource *res;
-		struct resource *root;
+	list_for_each_entry(root_res, &info->resources, list)
+		pci_add_resource(resources, &root_res->res);
 
 
-		res = &root_res->res;
-		pci_add_resource(resources, res);
-		if (res->flags & IORESOURCE_IO)
-			root = &ioport_resource;
-		else
-			root = &iomem_resource;
-		insert_resource(root, res);
-	}
 	return;
 	return;
 
 
 default_resources:
 default_resources:

+ 7 - 14
block/blk-core.c

@@ -2114,7 +2114,8 @@ blk_qc_t submit_bio(int rw, struct bio *bio)
 EXPORT_SYMBOL(submit_bio);
 EXPORT_SYMBOL(submit_bio);
 
 
 /**
 /**
- * blk_rq_check_limits - Helper function to check a request for the queue limit
+ * blk_cloned_rq_check_limits - Helper function to check a cloned request
+ *                              for new the queue limits
  * @q:  the queue
  * @q:  the queue
  * @rq: the request being checked
  * @rq: the request being checked
  *
  *
@@ -2125,20 +2126,13 @@ EXPORT_SYMBOL(submit_bio);
  *    after it is inserted to @q, it should be checked against @q before
  *    after it is inserted to @q, it should be checked against @q before
  *    the insertion using this generic function.
  *    the insertion using this generic function.
  *
  *
- *    This function should also be useful for request stacking drivers
- *    in some cases below, so export this function.
  *    Request stacking drivers like request-based dm may change the queue
  *    Request stacking drivers like request-based dm may change the queue
- *    limits while requests are in the queue (e.g. dm's table swapping).
- *    Such request stacking drivers should check those requests against
- *    the new queue limits again when they dispatch those requests,
- *    although such checkings are also done against the old queue limits
- *    when submitting requests.
+ *    limits when retrying requests on other queues. Those requests need
+ *    to be checked against the new queue limits again during dispatch.
  */
  */
-int blk_rq_check_limits(struct request_queue *q, struct request *rq)
+static int blk_cloned_rq_check_limits(struct request_queue *q,
+				      struct request *rq)
 {
 {
-	if (!rq_mergeable(rq))
-		return 0;
-
 	if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) {
 	if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) {
 		printk(KERN_ERR "%s: over max size limit.\n", __func__);
 		printk(KERN_ERR "%s: over max size limit.\n", __func__);
 		return -EIO;
 		return -EIO;
@@ -2158,7 +2152,6 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq)
 
 
 	return 0;
 	return 0;
 }
 }
-EXPORT_SYMBOL_GPL(blk_rq_check_limits);
 
 
 /**
 /**
  * blk_insert_cloned_request - Helper for stacking drivers to submit a request
  * blk_insert_cloned_request - Helper for stacking drivers to submit a request
@@ -2170,7 +2163,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
 	unsigned long flags;
 	unsigned long flags;
 	int where = ELEVATOR_INSERT_BACK;
 	int where = ELEVATOR_INSERT_BACK;
 
 
-	if (blk_rq_check_limits(q, rq))
+	if (blk_cloned_rq_check_limits(q, rq))
 		return -EIO;
 		return -EIO;
 
 
 	if (rq->rq_disk &&
 	if (rq->rq_disk &&

+ 3 - 0
block/blk-merge.c

@@ -103,6 +103,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
 			bvprv = bv;
 			bvprv = bv;
 			bvprvp = &bvprv;
 			bvprvp = &bvprv;
 			sectors += bv.bv_len >> 9;
 			sectors += bv.bv_len >> 9;
+
+			if (nsegs == 1 && seg_size > front_seg_size)
+				front_seg_size = seg_size;
 			continue;
 			continue;
 		}
 		}
 new_segment:
 new_segment:

+ 16 - 20
block/blk-settings.c

@@ -91,7 +91,8 @@ void blk_set_default_limits(struct queue_limits *lim)
 	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
 	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
 	lim->virt_boundary_mask = 0;
 	lim->virt_boundary_mask = 0;
 	lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
 	lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
-	lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
+	lim->max_sectors = lim->max_dev_sectors = lim->max_hw_sectors =
+		BLK_SAFE_MAX_SECTORS;
 	lim->chunk_sectors = 0;
 	lim->chunk_sectors = 0;
 	lim->max_write_same_sectors = 0;
 	lim->max_write_same_sectors = 0;
 	lim->max_discard_sectors = 0;
 	lim->max_discard_sectors = 0;
@@ -127,6 +128,7 @@ void blk_set_stacking_limits(struct queue_limits *lim)
 	lim->max_hw_sectors = UINT_MAX;
 	lim->max_hw_sectors = UINT_MAX;
 	lim->max_segment_size = UINT_MAX;
 	lim->max_segment_size = UINT_MAX;
 	lim->max_sectors = UINT_MAX;
 	lim->max_sectors = UINT_MAX;
+	lim->max_dev_sectors = UINT_MAX;
 	lim->max_write_same_sectors = UINT_MAX;
 	lim->max_write_same_sectors = UINT_MAX;
 }
 }
 EXPORT_SYMBOL(blk_set_stacking_limits);
 EXPORT_SYMBOL(blk_set_stacking_limits);
@@ -214,8 +216,8 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
 EXPORT_SYMBOL(blk_queue_bounce_limit);
 EXPORT_SYMBOL(blk_queue_bounce_limit);
 
 
 /**
 /**
- * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request
- * @limits: the queue limits
+ * blk_queue_max_hw_sectors - set max sectors for a request for this queue
+ * @q:  the request queue for the device
  * @max_hw_sectors:  max hardware sectors in the usual 512b unit
  * @max_hw_sectors:  max hardware sectors in the usual 512b unit
  *
  *
  * Description:
  * Description:
@@ -224,13 +226,19 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
  *    the device driver based upon the capabilities of the I/O
  *    the device driver based upon the capabilities of the I/O
  *    controller.
  *    controller.
  *
  *
+ *    max_dev_sectors is a hard limit imposed by the storage device for
+ *    READ/WRITE requests. It is set by the disk driver.
+ *
  *    max_sectors is a soft limit imposed by the block layer for
  *    max_sectors is a soft limit imposed by the block layer for
  *    filesystem type requests.  This value can be overridden on a
  *    filesystem type requests.  This value can be overridden on a
  *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
  *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
  *    The soft limit can not exceed max_hw_sectors.
  *    The soft limit can not exceed max_hw_sectors.
  **/
  **/
-void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors)
+void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
 {
 {
+	struct queue_limits *limits = &q->limits;
+	unsigned int max_sectors;
+
 	if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
 	if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
 		max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
 		max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
 		printk(KERN_INFO "%s: set to minimum %d\n",
 		printk(KERN_INFO "%s: set to minimum %d\n",
@@ -238,22 +246,9 @@ void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_
 	}
 	}
 
 
 	limits->max_hw_sectors = max_hw_sectors;
 	limits->max_hw_sectors = max_hw_sectors;
-	limits->max_sectors = min_t(unsigned int, max_hw_sectors,
-				    BLK_DEF_MAX_SECTORS);
-}
-EXPORT_SYMBOL(blk_limits_max_hw_sectors);
-
-/**
- * blk_queue_max_hw_sectors - set max sectors for a request for this queue
- * @q:  the request queue for the device
- * @max_hw_sectors:  max hardware sectors in the usual 512b unit
- *
- * Description:
- *    See description for blk_limits_max_hw_sectors().
- **/
-void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
-{
-	blk_limits_max_hw_sectors(&q->limits, max_hw_sectors);
+	max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
+	max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
+	limits->max_sectors = max_sectors;
 }
 }
 EXPORT_SYMBOL(blk_queue_max_hw_sectors);
 EXPORT_SYMBOL(blk_queue_max_hw_sectors);
 
 
@@ -527,6 +522,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
 
 
 	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
 	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
 	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
 	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
+	t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
 	t->max_write_same_sectors = min(t->max_write_same_sectors,
 	t->max_write_same_sectors = min(t->max_write_same_sectors,
 					b->max_write_same_sectors);
 					b->max_write_same_sectors);
 	t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
 	t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);

+ 3 - 0
block/blk-sysfs.c

@@ -205,6 +205,9 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
 	if (ret < 0)
 	if (ret < 0)
 		return ret;
 		return ret;
 
 
+	max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
+					 q->limits.max_dev_sectors >> 1);
+
 	if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
 	if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
 		return -EINVAL;
 		return -EINVAL;
 
 

+ 1 - 1
block/partition-generic.c

@@ -397,7 +397,7 @@ static int drop_partitions(struct gendisk *disk, struct block_device *bdev)
 	struct hd_struct *part;
 	struct hd_struct *part;
 	int res;
 	int res;
 
 
-	if (bdev->bd_part_count)
+	if (bdev->bd_part_count || bdev->bd_super)
 		return -EBUSY;
 		return -EBUSY;
 	res = invalidate_partition(disk, 0);
 	res = invalidate_partition(disk, 0);
 	if (res)
 	if (res)

+ 2 - 2
crypto/algif_aead.c

@@ -125,7 +125,7 @@ static int aead_wait_for_data(struct sock *sk, unsigned flags)
 	if (flags & MSG_DONTWAIT)
 	if (flags & MSG_DONTWAIT)
 		return -EAGAIN;
 		return -EAGAIN;
 
 
-	set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
 
 
 	for (;;) {
 	for (;;) {
 		if (signal_pending(current))
 		if (signal_pending(current))
@@ -139,7 +139,7 @@ static int aead_wait_for_data(struct sock *sk, unsigned flags)
 	}
 	}
 	finish_wait(sk_sleep(sk), &wait);
 	finish_wait(sk_sleep(sk), &wait);
 
 
-	clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
 
 
 	return err;
 	return err;
 }
 }

+ 3 - 3
crypto/algif_skcipher.c

@@ -212,7 +212,7 @@ static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
 	if (flags & MSG_DONTWAIT)
 	if (flags & MSG_DONTWAIT)
 		return -EAGAIN;
 		return -EAGAIN;
 
 
-	set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+	sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 
 
 	for (;;) {
 	for (;;) {
 		if (signal_pending(current))
 		if (signal_pending(current))
@@ -258,7 +258,7 @@ static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
 		return -EAGAIN;
 		return -EAGAIN;
 	}
 	}
 
 
-	set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
 
 
 	for (;;) {
 	for (;;) {
 		if (signal_pending(current))
 		if (signal_pending(current))
@@ -272,7 +272,7 @@ static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
 	}
 	}
 	finish_wait(sk_sleep(sk), &wait);
 	finish_wait(sk_sleep(sk), &wait);
 
 
-	clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
 
 
 	return err;
 	return err;
 }
 }

+ 2 - 2
drivers/acpi/Kconfig

@@ -58,10 +58,10 @@ config ACPI_CCA_REQUIRED
 	bool
 	bool
 
 
 config ACPI_DEBUGGER
 config ACPI_DEBUGGER
-	bool "In-kernel debugger (EXPERIMENTAL)"
+	bool "AML debugger interface (EXPERIMENTAL)"
 	select ACPI_DEBUG
 	select ACPI_DEBUG
 	help
 	help
-	  Enable in-kernel debugging facilities: statistics, internal
+	  Enable in-kernel debugging of AML facilities: statistics, internal
 	  object dump, single step control method execution.
 	  object dump, single step control method execution.
 	  This is still under development, currently enabling this only
 	  This is still under development, currently enabling this only
 	  results in the compilation of the ACPICA debugger files.
 	  results in the compilation of the ACPICA debugger files.

+ 47 - 18
drivers/acpi/nfit.c

@@ -233,11 +233,12 @@ static bool add_spa(struct acpi_nfit_desc *acpi_desc,
 		struct nfit_table_prev *prev,
 		struct nfit_table_prev *prev,
 		struct acpi_nfit_system_address *spa)
 		struct acpi_nfit_system_address *spa)
 {
 {
+	size_t length = min_t(size_t, sizeof(*spa), spa->header.length);
 	struct device *dev = acpi_desc->dev;
 	struct device *dev = acpi_desc->dev;
 	struct nfit_spa *nfit_spa;
 	struct nfit_spa *nfit_spa;
 
 
 	list_for_each_entry(nfit_spa, &prev->spas, list) {
 	list_for_each_entry(nfit_spa, &prev->spas, list) {
-		if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) {
+		if (memcmp(nfit_spa->spa, spa, length) == 0) {
 			list_move_tail(&nfit_spa->list, &acpi_desc->spas);
 			list_move_tail(&nfit_spa->list, &acpi_desc->spas);
 			return true;
 			return true;
 		}
 		}
@@ -259,11 +260,12 @@ static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
 		struct nfit_table_prev *prev,
 		struct nfit_table_prev *prev,
 		struct acpi_nfit_memory_map *memdev)
 		struct acpi_nfit_memory_map *memdev)
 {
 {
+	size_t length = min_t(size_t, sizeof(*memdev), memdev->header.length);
 	struct device *dev = acpi_desc->dev;
 	struct device *dev = acpi_desc->dev;
 	struct nfit_memdev *nfit_memdev;
 	struct nfit_memdev *nfit_memdev;
 
 
 	list_for_each_entry(nfit_memdev, &prev->memdevs, list)
 	list_for_each_entry(nfit_memdev, &prev->memdevs, list)
-		if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) {
+		if (memcmp(nfit_memdev->memdev, memdev, length) == 0) {
 			list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
 			list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
 			return true;
 			return true;
 		}
 		}
@@ -284,11 +286,12 @@ static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
 		struct nfit_table_prev *prev,
 		struct nfit_table_prev *prev,
 		struct acpi_nfit_control_region *dcr)
 		struct acpi_nfit_control_region *dcr)
 {
 {
+	size_t length = min_t(size_t, sizeof(*dcr), dcr->header.length);
 	struct device *dev = acpi_desc->dev;
 	struct device *dev = acpi_desc->dev;
 	struct nfit_dcr *nfit_dcr;
 	struct nfit_dcr *nfit_dcr;
 
 
 	list_for_each_entry(nfit_dcr, &prev->dcrs, list)
 	list_for_each_entry(nfit_dcr, &prev->dcrs, list)
-		if (memcmp(nfit_dcr->dcr, dcr, sizeof(*dcr)) == 0) {
+		if (memcmp(nfit_dcr->dcr, dcr, length) == 0) {
 			list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
 			list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
 			return true;
 			return true;
 		}
 		}
@@ -308,11 +311,12 @@ static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
 		struct nfit_table_prev *prev,
 		struct nfit_table_prev *prev,
 		struct acpi_nfit_data_region *bdw)
 		struct acpi_nfit_data_region *bdw)
 {
 {
+	size_t length = min_t(size_t, sizeof(*bdw), bdw->header.length);
 	struct device *dev = acpi_desc->dev;
 	struct device *dev = acpi_desc->dev;
 	struct nfit_bdw *nfit_bdw;
 	struct nfit_bdw *nfit_bdw;
 
 
 	list_for_each_entry(nfit_bdw, &prev->bdws, list)
 	list_for_each_entry(nfit_bdw, &prev->bdws, list)
-		if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) {
+		if (memcmp(nfit_bdw->bdw, bdw, length) == 0) {
 			list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
 			list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
 			return true;
 			return true;
 		}
 		}
@@ -332,11 +336,12 @@ static bool add_idt(struct acpi_nfit_desc *acpi_desc,
 		struct nfit_table_prev *prev,
 		struct nfit_table_prev *prev,
 		struct acpi_nfit_interleave *idt)
 		struct acpi_nfit_interleave *idt)
 {
 {
+	size_t length = min_t(size_t, sizeof(*idt), idt->header.length);
 	struct device *dev = acpi_desc->dev;
 	struct device *dev = acpi_desc->dev;
 	struct nfit_idt *nfit_idt;
 	struct nfit_idt *nfit_idt;
 
 
 	list_for_each_entry(nfit_idt, &prev->idts, list)
 	list_for_each_entry(nfit_idt, &prev->idts, list)
-		if (memcmp(nfit_idt->idt, idt, sizeof(*idt)) == 0) {
+		if (memcmp(nfit_idt->idt, idt, length) == 0) {
 			list_move_tail(&nfit_idt->list, &acpi_desc->idts);
 			list_move_tail(&nfit_idt->list, &acpi_desc->idts);
 			return true;
 			return true;
 		}
 		}
@@ -356,11 +361,12 @@ static bool add_flush(struct acpi_nfit_desc *acpi_desc,
 		struct nfit_table_prev *prev,
 		struct nfit_table_prev *prev,
 		struct acpi_nfit_flush_address *flush)
 		struct acpi_nfit_flush_address *flush)
 {
 {
+	size_t length = min_t(size_t, sizeof(*flush), flush->header.length);
 	struct device *dev = acpi_desc->dev;
 	struct device *dev = acpi_desc->dev;
 	struct nfit_flush *nfit_flush;
 	struct nfit_flush *nfit_flush;
 
 
 	list_for_each_entry(nfit_flush, &prev->flushes, list)
 	list_for_each_entry(nfit_flush, &prev->flushes, list)
-		if (memcmp(nfit_flush->flush, flush, sizeof(*flush)) == 0) {
+		if (memcmp(nfit_flush->flush, flush, length) == 0) {
 			list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
 			list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
 			return true;
 			return true;
 		}
 		}
@@ -655,7 +661,7 @@ static ssize_t revision_show(struct device *dev,
 	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
 	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
 
 
-	return sprintf(buf, "%d\n", acpi_desc->nfit->header.revision);
+	return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
 }
 }
 static DEVICE_ATTR_RO(revision);
 static DEVICE_ATTR_RO(revision);
 
 
@@ -1652,7 +1658,6 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
 
 
 	data = (u8 *) acpi_desc->nfit;
 	data = (u8 *) acpi_desc->nfit;
 	end = data + sz;
 	end = data + sz;
-	data += sizeof(struct acpi_table_nfit);
 	while (!IS_ERR_OR_NULL(data))
 	while (!IS_ERR_OR_NULL(data))
 		data = add_table(acpi_desc, &prev, data, end);
 		data = add_table(acpi_desc, &prev, data, end);
 
 
@@ -1748,13 +1753,29 @@ static int acpi_nfit_add(struct acpi_device *adev)
 		return PTR_ERR(acpi_desc);
 		return PTR_ERR(acpi_desc);
 	}
 	}
 
 
-	acpi_desc->nfit = (struct acpi_table_nfit *) tbl;
+	/*
+	 * Save the acpi header for later and then skip it,
+	 * making nfit point to the first nfit table header.
+	 */
+	acpi_desc->acpi_header = *tbl;
+	acpi_desc->nfit = (void *) tbl + sizeof(struct acpi_table_nfit);
+	sz -= sizeof(struct acpi_table_nfit);
 
 
 	/* Evaluate _FIT and override with that if present */
 	/* Evaluate _FIT and override with that if present */
 	status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
 	status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
 	if (ACPI_SUCCESS(status) && buf.length > 0) {
 	if (ACPI_SUCCESS(status) && buf.length > 0) {
-		acpi_desc->nfit = (struct acpi_table_nfit *)buf.pointer;
-		sz = buf.length;
+		union acpi_object *obj;
+		/*
+		 * Adjust for the acpi_object header of the _FIT
+		 */
+		obj = buf.pointer;
+		if (obj->type == ACPI_TYPE_BUFFER) {
+			acpi_desc->nfit =
+				(struct acpi_nfit_header *)obj->buffer.pointer;
+			sz = obj->buffer.length;
+		} else
+			dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n",
+				 __func__, (int) obj->type);
 	}
 	}
 
 
 	rc = acpi_nfit_init(acpi_desc, sz);
 	rc = acpi_nfit_init(acpi_desc, sz);
@@ -1777,7 +1798,8 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
 {
 {
 	struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
 	struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
 	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
 	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
-	struct acpi_table_nfit *nfit_saved;
+	struct acpi_nfit_header *nfit_saved;
+	union acpi_object *obj;
 	struct device *dev = &adev->dev;
 	struct device *dev = &adev->dev;
 	acpi_status status;
 	acpi_status status;
 	int ret;
 	int ret;
@@ -1808,12 +1830,19 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
 	}
 	}
 
 
 	nfit_saved = acpi_desc->nfit;
 	nfit_saved = acpi_desc->nfit;
-	acpi_desc->nfit = (struct acpi_table_nfit *)buf.pointer;
-	ret = acpi_nfit_init(acpi_desc, buf.length);
-	if (!ret) {
-		/* Merge failed, restore old nfit, and exit */
-		acpi_desc->nfit = nfit_saved;
-		dev_err(dev, "failed to merge updated NFIT\n");
+	obj = buf.pointer;
+	if (obj->type == ACPI_TYPE_BUFFER) {
+		acpi_desc->nfit =
+			(struct acpi_nfit_header *)obj->buffer.pointer;
+		ret = acpi_nfit_init(acpi_desc, obj->buffer.length);
+		if (ret) {
+			/* Merge failed, restore old nfit, and exit */
+			acpi_desc->nfit = nfit_saved;
+			dev_err(dev, "failed to merge updated NFIT\n");
+		}
+	} else {
+		/* Bad _FIT, restore old nfit */
+		dev_err(dev, "Invalid _FIT\n");
 	}
 	}
 	kfree(buf.pointer);
 	kfree(buf.pointer);
 
 

+ 2 - 1
drivers/acpi/nfit.h

@@ -96,7 +96,8 @@ struct nfit_mem {
 
 
 struct acpi_nfit_desc {
 struct acpi_nfit_desc {
 	struct nvdimm_bus_descriptor nd_desc;
 	struct nvdimm_bus_descriptor nd_desc;
-	struct acpi_table_nfit *nfit;
+	struct acpi_table_header acpi_header;
+	struct acpi_nfit_header *nfit;
 	struct mutex spa_map_mutex;
 	struct mutex spa_map_mutex;
 	struct mutex init_mutex;
 	struct mutex init_mutex;
 	struct list_head spa_maps;
 	struct list_head spa_maps;

+ 7 - 0
drivers/acpi/pci_root.c

@@ -768,6 +768,13 @@ static void pci_acpi_root_add_resources(struct acpi_pci_root_info *info)
 		else
 		else
 			continue;
 			continue;
 
 
+		/*
+		 * Some legacy x86 host bridge drivers use iomem_resource and
+		 * ioport_resource as default resource pool, skip it.
+		 */
+		if (res == root)
+			continue;
+
 		conflict = insert_resource_conflict(root, res);
 		conflict = insert_resource_conflict(root, res);
 		if (conflict) {
 		if (conflict) {
 			dev_info(&info->bridge->dev,
 			dev_info(&info->bridge->dev,

+ 1 - 2
drivers/base/power/domain.c

@@ -1775,10 +1775,10 @@ int genpd_dev_pm_attach(struct device *dev)
 	}
 	}
 
 
 	pd = of_genpd_get_from_provider(&pd_args);
 	pd = of_genpd_get_from_provider(&pd_args);
+	of_node_put(pd_args.np);
 	if (IS_ERR(pd)) {
 	if (IS_ERR(pd)) {
 		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
 		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
 			__func__, PTR_ERR(pd));
 			__func__, PTR_ERR(pd));
-		of_node_put(dev->of_node);
 		return -EPROBE_DEFER;
 		return -EPROBE_DEFER;
 	}
 	}
 
 
@@ -1796,7 +1796,6 @@ int genpd_dev_pm_attach(struct device *dev)
 	if (ret < 0) {
 	if (ret < 0) {
 		dev_err(dev, "failed to add to PM domain %s: %d",
 		dev_err(dev, "failed to add to PM domain %s: %d",
 			pd->name, ret);
 			pd->name, ret);
-		of_node_put(dev->of_node);
 		goto out;
 		goto out;
 	}
 	}
 
 

+ 0 - 3
drivers/base/power/domain_governor.c

@@ -160,9 +160,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
 		struct gpd_timing_data *td;
 		struct gpd_timing_data *td;
 		s64 constraint_ns;
 		s64 constraint_ns;
 
 
-		if (!pdd->dev->driver)
-			continue;
-
 		/*
 		/*
 		 * Check if the device is allowed to be off long enough for the
 		 * Check if the device is allowed to be off long enough for the
 		 * domain to turn off and on (that's how much time it will
 		 * domain to turn off and on (that's how much time it will

+ 33 - 61
drivers/block/null_blk.c

@@ -18,6 +18,7 @@ struct nullb_cmd {
 	struct bio *bio;
 	struct bio *bio;
 	unsigned int tag;
 	unsigned int tag;
 	struct nullb_queue *nq;
 	struct nullb_queue *nq;
+	struct hrtimer timer;
 };
 };
 
 
 struct nullb_queue {
 struct nullb_queue {
@@ -49,17 +50,6 @@ static int null_major;
 static int nullb_indexes;
 static int nullb_indexes;
 static struct kmem_cache *ppa_cache;
 static struct kmem_cache *ppa_cache;
 
 
-struct completion_queue {
-	struct llist_head list;
-	struct hrtimer timer;
-};
-
-/*
- * These are per-cpu for now, they will need to be configured by the
- * complete_queues parameter and appropriately mapped.
- */
-static DEFINE_PER_CPU(struct completion_queue, completion_queues);
-
 enum {
 enum {
 	NULL_IRQ_NONE		= 0,
 	NULL_IRQ_NONE		= 0,
 	NULL_IRQ_SOFTIRQ	= 1,
 	NULL_IRQ_SOFTIRQ	= 1,
@@ -142,8 +132,8 @@ static const struct kernel_param_ops null_irqmode_param_ops = {
 device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO);
 device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO);
 MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
 MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
 
 
-static int completion_nsec = 10000;
-module_param(completion_nsec, int, S_IRUGO);
+static unsigned long completion_nsec = 10000;
+module_param(completion_nsec, ulong, S_IRUGO);
 MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
 MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
 
 
 static int hw_queue_depth = 64;
 static int hw_queue_depth = 64;
@@ -180,6 +170,8 @@ static void free_cmd(struct nullb_cmd *cmd)
 	put_tag(cmd->nq, cmd->tag);
 	put_tag(cmd->nq, cmd->tag);
 }
 }
 
 
+static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
+
 static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
 static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
 {
 {
 	struct nullb_cmd *cmd;
 	struct nullb_cmd *cmd;
@@ -190,6 +182,11 @@ static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
 		cmd = &nq->cmds[tag];
 		cmd = &nq->cmds[tag];
 		cmd->tag = tag;
 		cmd->tag = tag;
 		cmd->nq = nq;
 		cmd->nq = nq;
+		if (irqmode == NULL_IRQ_TIMER) {
+			hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
+				     HRTIMER_MODE_REL);
+			cmd->timer.function = null_cmd_timer_expired;
+		}
 		return cmd;
 		return cmd;
 	}
 	}
 
 
@@ -220,6 +217,8 @@ static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
 
 
 static void end_cmd(struct nullb_cmd *cmd)
 static void end_cmd(struct nullb_cmd *cmd)
 {
 {
+	struct request_queue *q = NULL;
+
 	switch (queue_mode)  {
 	switch (queue_mode)  {
 	case NULL_Q_MQ:
 	case NULL_Q_MQ:
 		blk_mq_end_request(cmd->rq, 0);
 		blk_mq_end_request(cmd->rq, 0);
@@ -230,55 +229,37 @@ static void end_cmd(struct nullb_cmd *cmd)
 		break;
 		break;
 	case NULL_Q_BIO:
 	case NULL_Q_BIO:
 		bio_endio(cmd->bio);
 		bio_endio(cmd->bio);
-		break;
+		goto free_cmd;
 	}
 	}
 
 
+	if (cmd->rq)
+		q = cmd->rq->q;
+
+	/* Restart queue if needed, as we are freeing a tag */
+	if (q && !q->mq_ops && blk_queue_stopped(q)) {
+		unsigned long flags;
+
+		spin_lock_irqsave(q->queue_lock, flags);
+		if (blk_queue_stopped(q))
+			blk_start_queue(q);
+		spin_unlock_irqrestore(q->queue_lock, flags);
+	}
+free_cmd:
 	free_cmd(cmd);
 	free_cmd(cmd);
 }
 }
 
 
 static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
 static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
 {
 {
-	struct completion_queue *cq;
-	struct llist_node *entry;
-	struct nullb_cmd *cmd;
-
-	cq = &per_cpu(completion_queues, smp_processor_id());
-
-	while ((entry = llist_del_all(&cq->list)) != NULL) {
-		entry = llist_reverse_order(entry);
-		do {
-			struct request_queue *q = NULL;
-
-			cmd = container_of(entry, struct nullb_cmd, ll_list);
-			entry = entry->next;
-			if (cmd->rq)
-				q = cmd->rq->q;
-			end_cmd(cmd);
-
-			if (q && !q->mq_ops && blk_queue_stopped(q)) {
-				spin_lock(q->queue_lock);
-				if (blk_queue_stopped(q))
-					blk_start_queue(q);
-				spin_unlock(q->queue_lock);
-			}
-		} while (entry);
-	}
+	end_cmd(container_of(timer, struct nullb_cmd, timer));
 
 
 	return HRTIMER_NORESTART;
 	return HRTIMER_NORESTART;
 }
 }
 
 
 static void null_cmd_end_timer(struct nullb_cmd *cmd)
 static void null_cmd_end_timer(struct nullb_cmd *cmd)
 {
 {
-	struct completion_queue *cq = &per_cpu(completion_queues, get_cpu());
-
-	cmd->ll_list.next = NULL;
-	if (llist_add(&cmd->ll_list, &cq->list)) {
-		ktime_t kt = ktime_set(0, completion_nsec);
-
-		hrtimer_start(&cq->timer, kt, HRTIMER_MODE_REL_PINNED);
-	}
+	ktime_t kt = ktime_set(0, completion_nsec);
 
 
-	put_cpu();
+	hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
 }
 }
 
 
 static void null_softirq_done_fn(struct request *rq)
 static void null_softirq_done_fn(struct request *rq)
@@ -376,6 +357,10 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
 {
 {
 	struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
 	struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
 
 
+	if (irqmode == NULL_IRQ_TIMER) {
+		hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+		cmd->timer.function = null_cmd_timer_expired;
+	}
 	cmd->rq = bd->rq;
 	cmd->rq = bd->rq;
 	cmd->nq = hctx->driver_data;
 	cmd->nq = hctx->driver_data;
 
 
@@ -813,19 +798,6 @@ static int __init null_init(void)
 
 
 	mutex_init(&lock);
 	mutex_init(&lock);
 
 
-	/* Initialize a separate list for each CPU for issuing softirqs */
-	for_each_possible_cpu(i) {
-		struct completion_queue *cq = &per_cpu(completion_queues, i);
-
-		init_llist_head(&cq->list);
-
-		if (irqmode != NULL_IRQ_TIMER)
-			continue;
-
-		hrtimer_init(&cq->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-		cq->timer.function = null_cmd_timer_expired;
-	}
-
 	null_major = register_blkdev(0, "nullb");
 	null_major = register_blkdev(0, "nullb");
 	if (null_major < 0)
 	if (null_major < 0)
 		return null_major;
 		return null_major;

+ 1 - 0
drivers/block/rbd.c

@@ -3442,6 +3442,7 @@ static void rbd_queue_workfn(struct work_struct *work)
 		goto err_rq;
 		goto err_rq;
 	}
 	}
 	img_request->rq = rq;
 	img_request->rq = rq;
+	snapc = NULL; /* img_request consumes a ref */
 
 
 	if (op_type == OBJ_OP_DISCARD)
 	if (op_type == OBJ_OP_DISCARD)
 		result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
 		result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,

+ 10 - 4
drivers/cpufreq/cpufreq.c

@@ -976,10 +976,14 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
 
 
 	new_policy.governor = gov;
 	new_policy.governor = gov;
 
 
-	/* Use the default policy if its valid. */
-	if (cpufreq_driver->setpolicy)
-		cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
-
+	/* Use the default policy if there is no last_policy. */
+	if (cpufreq_driver->setpolicy) {
+		if (policy->last_policy)
+			new_policy.policy = policy->last_policy;
+		else
+			cpufreq_parse_governor(gov->name, &new_policy.policy,
+					       NULL);
+	}
 	/* set default policy */
 	/* set default policy */
 	return cpufreq_set_policy(policy, &new_policy);
 	return cpufreq_set_policy(policy, &new_policy);
 }
 }
@@ -1330,6 +1334,8 @@ static void cpufreq_offline_prepare(unsigned int cpu)
 		if (has_target())
 		if (has_target())
 			strncpy(policy->last_governor, policy->governor->name,
 			strncpy(policy->last_governor, policy->governor->name,
 				CPUFREQ_NAME_LEN);
 				CPUFREQ_NAME_LEN);
+		else
+			policy->last_policy = policy->policy;
 	} else if (cpu == policy->cpu) {
 	} else if (cpu == policy->cpu) {
 		/* Nominate new CPU */
 		/* Nominate new CPU */
 		policy->cpu = cpumask_any(policy->cpus);
 		policy->cpu = cpumask_any(policy->cpus);

+ 1 - 1
drivers/crypto/nx/nx-aes-ccm.c

@@ -409,7 +409,7 @@ static int ccm_nx_decrypt(struct aead_request   *req,
 		processed += to_process;
 		processed += to_process;
 	} while (processed < nbytes);
 	} while (processed < nbytes);
 
 
-	rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
+	rc = crypto_memneq(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
 		    authsize) ? -EBADMSG : 0;
 		    authsize) ? -EBADMSG : 0;
 out:
 out:
 	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
 	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);

+ 2 - 1
drivers/crypto/nx/nx-aes-gcm.c

@@ -21,6 +21,7 @@
 
 
 #include <crypto/internal/aead.h>
 #include <crypto/internal/aead.h>
 #include <crypto/aes.h>
 #include <crypto/aes.h>
+#include <crypto/algapi.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/scatterwalk.h>
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/types.h>
@@ -418,7 +419,7 @@ mac:
 			itag, req->src, req->assoclen + nbytes,
 			itag, req->src, req->assoclen + nbytes,
 			crypto_aead_authsize(crypto_aead_reqtfm(req)),
 			crypto_aead_authsize(crypto_aead_reqtfm(req)),
 			SCATTERWALK_FROM_SG);
 			SCATTERWALK_FROM_SG);
-		rc = memcmp(itag, otag,
+		rc = crypto_memneq(itag, otag,
 			    crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
 			    crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
 		     -EBADMSG : 0;
 		     -EBADMSG : 0;
 	}
 	}

+ 1 - 1
drivers/crypto/talitos.c

@@ -977,7 +977,7 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
 		} else
 		} else
 			oicv = (char *)&edesc->link_tbl[0];
 			oicv = (char *)&edesc->link_tbl[0];
 
 
-		err = memcmp(oicv, icv, authsize) ? -EBADMSG : 0;
+		err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
 	}
 	}
 
 
 	kfree(edesc);
 	kfree(edesc);

+ 5 - 2
drivers/gpio/gpio-74xx-mmio.c

@@ -113,13 +113,16 @@ static int mmio_74xx_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
 
 
 static int mmio_74xx_gpio_probe(struct platform_device *pdev)
 static int mmio_74xx_gpio_probe(struct platform_device *pdev)
 {
 {
-	const struct of_device_id *of_id =
-		of_match_device(mmio_74xx_gpio_ids, &pdev->dev);
+	const struct of_device_id *of_id;
 	struct mmio_74xx_gpio_priv *priv;
 	struct mmio_74xx_gpio_priv *priv;
 	struct resource *res;
 	struct resource *res;
 	void __iomem *dat;
 	void __iomem *dat;
 	int err;
 	int err;
 
 
+	of_id = of_match_device(mmio_74xx_gpio_ids, &pdev->dev);
+	if (!of_id)
+		return -ENODEV;
+
 	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
 	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
 	if (!priv)
 	if (!priv)
 		return -ENOMEM;
 		return -ENOMEM;

+ 0 - 2
drivers/gpio/gpio-omap.c

@@ -1122,8 +1122,6 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
 	/* MPUIO is a bit different, reading IRQ status clears it */
 	/* MPUIO is a bit different, reading IRQ status clears it */
 	if (bank->is_mpuio) {
 	if (bank->is_mpuio) {
 		irqc->irq_ack = dummy_irq_chip.irq_ack;
 		irqc->irq_ack = dummy_irq_chip.irq_ack;
-		irqc->irq_mask = irq_gc_mask_set_bit;
-		irqc->irq_unmask = irq_gc_mask_clr_bit;
 		if (!bank->regs->wkup_en)
 		if (!bank->regs->wkup_en)
 			irqc->irq_set_wake = NULL;
 			irqc->irq_set_wake = NULL;
 	}
 	}

+ 2 - 0
drivers/gpio/gpio-palmas.c

@@ -167,6 +167,8 @@ static int palmas_gpio_probe(struct platform_device *pdev)
 	const struct palmas_device_data *dev_data;
 	const struct palmas_device_data *dev_data;
 
 
 	match = of_match_device(of_palmas_gpio_match, &pdev->dev);
 	match = of_match_device(of_palmas_gpio_match, &pdev->dev);
+	if (!match)
+		return -ENODEV;
 	dev_data = match->data;
 	dev_data = match->data;
 	if (!dev_data)
 	if (!dev_data)
 		dev_data = &palmas_dev_data;
 		dev_data = &palmas_dev_data;

+ 5 - 1
drivers/gpio/gpio-syscon.c

@@ -187,11 +187,15 @@ MODULE_DEVICE_TABLE(of, syscon_gpio_ids);
 static int syscon_gpio_probe(struct platform_device *pdev)
 static int syscon_gpio_probe(struct platform_device *pdev)
 {
 {
 	struct device *dev = &pdev->dev;
 	struct device *dev = &pdev->dev;
-	const struct of_device_id *of_id = of_match_device(syscon_gpio_ids, dev);
+	const struct of_device_id *of_id;
 	struct syscon_gpio_priv *priv;
 	struct syscon_gpio_priv *priv;
 	struct device_node *np = dev->of_node;
 	struct device_node *np = dev->of_node;
 	int ret;
 	int ret;
 
 
+	of_id = of_match_device(syscon_gpio_ids, dev);
+	if (!of_id)
+		return -ENODEV;
+
 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
 	if (!priv)
 	if (!priv)
 		return -ENOMEM;
 		return -ENOMEM;

+ 56 - 49
drivers/gpio/gpio-tegra.c

@@ -375,6 +375,60 @@ static int tegra_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
 }
 }
 #endif
 #endif
 
 
+#ifdef	CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+static int dbg_gpio_show(struct seq_file *s, void *unused)
+{
+	int i;
+	int j;
+
+	for (i = 0; i < tegra_gpio_bank_count; i++) {
+		for (j = 0; j < 4; j++) {
+			int gpio = tegra_gpio_compose(i, j, 0);
+			seq_printf(s,
+				"%d:%d %02x %02x %02x %02x %02x %02x %06x\n",
+				i, j,
+				tegra_gpio_readl(GPIO_CNF(gpio)),
+				tegra_gpio_readl(GPIO_OE(gpio)),
+				tegra_gpio_readl(GPIO_OUT(gpio)),
+				tegra_gpio_readl(GPIO_IN(gpio)),
+				tegra_gpio_readl(GPIO_INT_STA(gpio)),
+				tegra_gpio_readl(GPIO_INT_ENB(gpio)),
+				tegra_gpio_readl(GPIO_INT_LVL(gpio)));
+		}
+	}
+	return 0;
+}
+
+static int dbg_gpio_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dbg_gpio_show, &inode->i_private);
+}
+
+static const struct file_operations debug_fops = {
+	.open		= dbg_gpio_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static void tegra_gpio_debuginit(void)
+{
+	(void) debugfs_create_file("tegra_gpio", S_IRUGO,
+					NULL, NULL, &debug_fops);
+}
+
+#else
+
+static inline void tegra_gpio_debuginit(void)
+{
+}
+
+#endif
+
 static struct irq_chip tegra_gpio_irq_chip = {
 static struct irq_chip tegra_gpio_irq_chip = {
 	.name		= "GPIO",
 	.name		= "GPIO",
 	.irq_ack	= tegra_gpio_irq_ack,
 	.irq_ack	= tegra_gpio_irq_ack,
@@ -519,6 +573,8 @@ static int tegra_gpio_probe(struct platform_device *pdev)
 			spin_lock_init(&bank->lvl_lock[j]);
 			spin_lock_init(&bank->lvl_lock[j]);
 	}
 	}
 
 
+	tegra_gpio_debuginit();
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -536,52 +592,3 @@ static int __init tegra_gpio_init(void)
 	return platform_driver_register(&tegra_gpio_driver);
 	return platform_driver_register(&tegra_gpio_driver);
 }
 }
 postcore_initcall(tegra_gpio_init);
 postcore_initcall(tegra_gpio_init);
-
-#ifdef	CONFIG_DEBUG_FS
-
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-
-static int dbg_gpio_show(struct seq_file *s, void *unused)
-{
-	int i;
-	int j;
-
-	for (i = 0; i < tegra_gpio_bank_count; i++) {
-		for (j = 0; j < 4; j++) {
-			int gpio = tegra_gpio_compose(i, j, 0);
-			seq_printf(s,
-				"%d:%d %02x %02x %02x %02x %02x %02x %06x\n",
-				i, j,
-				tegra_gpio_readl(GPIO_CNF(gpio)),
-				tegra_gpio_readl(GPIO_OE(gpio)),
-				tegra_gpio_readl(GPIO_OUT(gpio)),
-				tegra_gpio_readl(GPIO_IN(gpio)),
-				tegra_gpio_readl(GPIO_INT_STA(gpio)),
-				tegra_gpio_readl(GPIO_INT_ENB(gpio)),
-				tegra_gpio_readl(GPIO_INT_LVL(gpio)));
-		}
-	}
-	return 0;
-}
-
-static int dbg_gpio_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, dbg_gpio_show, &inode->i_private);
-}
-
-static const struct file_operations debug_fops = {
-	.open		= dbg_gpio_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
-
-static int __init tegra_gpio_debuginit(void)
-{
-	(void) debugfs_create_file("tegra_gpio", S_IRUGO,
-					NULL, NULL, &debug_fops);
-	return 0;
-}
-late_initcall(tegra_gpio_debuginit);
-#endif

+ 1 - 1
drivers/gpio/gpiolib.c

@@ -233,7 +233,7 @@ static struct gpio_desc *gpio_name_to_desc(const char * const name)
 		for (i = 0; i != chip->ngpio; ++i) {
 		for (i = 0; i != chip->ngpio; ++i) {
 			struct gpio_desc *gpio = &chip->desc[i];
 			struct gpio_desc *gpio = &chip->desc[i];
 
 
-			if (!gpio->name)
+			if (!gpio->name || !name)
 				continue;
 				continue;
 
 
 			if (!strcmp(gpio->name, name)) {
 			if (!strcmp(gpio->name, name)) {

+ 3 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu.h

@@ -539,6 +539,7 @@ struct amdgpu_bo {
 	/* Constant after initialization */
 	/* Constant after initialization */
 	struct amdgpu_device		*adev;
 	struct amdgpu_device		*adev;
 	struct drm_gem_object		gem_base;
 	struct drm_gem_object		gem_base;
+	struct amdgpu_bo		*parent;
 
 
 	struct ttm_bo_kmap_obj		dma_buf_vmap;
 	struct ttm_bo_kmap_obj		dma_buf_vmap;
 	pid_t				pid;
 	pid_t				pid;
@@ -955,6 +956,8 @@ struct amdgpu_vm {
 	struct amdgpu_vm_id	ids[AMDGPU_MAX_RINGS];
 	struct amdgpu_vm_id	ids[AMDGPU_MAX_RINGS];
 	/* for interval tree */
 	/* for interval tree */
 	spinlock_t		it_lock;
 	spinlock_t		it_lock;
+	/* protecting freed */
+	spinlock_t		freed_lock;
 };
 };
 
 
 struct amdgpu_vm_manager {
 struct amdgpu_vm_manager {

+ 4 - 2
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c

@@ -222,6 +222,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
 				}
 				}
 
 
 				p->uf.bo = gem_to_amdgpu_bo(gobj);
 				p->uf.bo = gem_to_amdgpu_bo(gobj);
+				amdgpu_bo_ref(p->uf.bo);
+				drm_gem_object_unreference_unlocked(gobj);
 				p->uf.offset = fence_data->offset;
 				p->uf.offset = fence_data->offset;
 			} else {
 			} else {
 				ret = -EINVAL;
 				ret = -EINVAL;
@@ -487,7 +489,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
 			amdgpu_ib_free(parser->adev, &parser->ibs[i]);
 			amdgpu_ib_free(parser->adev, &parser->ibs[i]);
 	kfree(parser->ibs);
 	kfree(parser->ibs);
 	if (parser->uf.bo)
 	if (parser->uf.bo)
-		drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
+		amdgpu_bo_unref(&parser->uf.bo);
 }
 }
 
 
 static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
 static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
@@ -776,7 +778,7 @@ static int amdgpu_cs_free_job(struct amdgpu_job *job)
 			amdgpu_ib_free(job->adev, &job->ibs[i]);
 			amdgpu_ib_free(job->adev, &job->ibs[i]);
 	kfree(job->ibs);
 	kfree(job->ibs);
 	if (job->uf.bo)
 	if (job->uf.bo)
-		drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base);
+		amdgpu_bo_unref(&job->uf.bo);
 	return 0;
 	return 0;
 }
 }
 
 

+ 79 - 29
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c

@@ -73,6 +73,8 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
 	struct drm_crtc *crtc = &amdgpuCrtc->base;
 	struct drm_crtc *crtc = &amdgpuCrtc->base;
 	unsigned long flags;
 	unsigned long flags;
 	unsigned i;
 	unsigned i;
+	int vpos, hpos, stat, min_udelay;
+	struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
 
 
 	amdgpu_flip_wait_fence(adev, &work->excl);
 	amdgpu_flip_wait_fence(adev, &work->excl);
 	for (i = 0; i < work->shared_count; ++i)
 	for (i = 0; i < work->shared_count; ++i)
@@ -81,6 +83,41 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
 	/* We borrow the event spin lock for protecting flip_status */
 	/* We borrow the event spin lock for protecting flip_status */
 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
 
 
+	/* If this happens to execute within the "virtually extended" vblank
+	 * interval before the start of the real vblank interval then it needs
+	 * to delay programming the mmio flip until the real vblank is entered.
+	 * This prevents completing a flip too early due to the way we fudge
+	 * our vblank counter and vblank timestamps in order to work around the
+	 * problem that the hw fires vblank interrupts before actual start of
+	 * vblank (when line buffer refilling is done for a frame). It
+	 * complements the fudging logic in amdgpu_get_crtc_scanoutpos() for
+	 * timestamping and amdgpu_get_vblank_counter_kms() for vblank counts.
+	 *
+	 * In practice this won't execute very often unless on very fast
+	 * machines because the time window for this to happen is very small.
+	 */
+	for (;;) {
+		/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
+		 * start in hpos, and to the "fudged earlier" vblank start in
+		 * vpos.
+		 */
+		stat = amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id,
+						  GET_DISTANCE_TO_VBLANKSTART,
+						  &vpos, &hpos, NULL, NULL,
+						  &crtc->hwmode);
+
+		if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
+		    (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) ||
+		    !(vpos >= 0 && hpos <= 0))
+			break;
+
+		/* Sleep at least until estimated real start of hw vblank */
+		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+		min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
+		usleep_range(min_udelay, 2 * min_udelay);
+		spin_lock_irqsave(&crtc->dev->event_lock, flags);
+	};
+
 	/* do the flip (mmio) */
 	/* do the flip (mmio) */
 	adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
 	adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
 	/* set the flip status */
 	/* set the flip status */
@@ -109,7 +146,7 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
 	} else
 	} else
 		DRM_ERROR("failed to reserve buffer after flip\n");
 		DRM_ERROR("failed to reserve buffer after flip\n");
 
 
-	drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
+	amdgpu_bo_unref(&work->old_rbo);
 	kfree(work->shared);
 	kfree(work->shared);
 	kfree(work);
 	kfree(work);
 }
 }
@@ -148,8 +185,8 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
 	obj = old_amdgpu_fb->obj;
 	obj = old_amdgpu_fb->obj;
 
 
 	/* take a reference to the old object */
 	/* take a reference to the old object */
-	drm_gem_object_reference(obj);
 	work->old_rbo = gem_to_amdgpu_bo(obj);
 	work->old_rbo = gem_to_amdgpu_bo(obj);
+	amdgpu_bo_ref(work->old_rbo);
 
 
 	new_amdgpu_fb = to_amdgpu_framebuffer(fb);
 	new_amdgpu_fb = to_amdgpu_framebuffer(fb);
 	obj = new_amdgpu_fb->obj;
 	obj = new_amdgpu_fb->obj;
@@ -222,7 +259,7 @@ pflip_cleanup:
 	amdgpu_bo_unreserve(new_rbo);
 	amdgpu_bo_unreserve(new_rbo);
 
 
 cleanup:
 cleanup:
-	drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
+	amdgpu_bo_unref(&work->old_rbo);
 	fence_put(work->excl);
 	fence_put(work->excl);
 	for (i = 0; i < work->shared_count; ++i)
 	for (i = 0; i < work->shared_count; ++i)
 		fence_put(work->shared[i]);
 		fence_put(work->shared[i]);
@@ -712,6 +749,15 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
  * \param dev Device to query.
  * \param dev Device to query.
  * \param pipe Crtc to query.
  * \param pipe Crtc to query.
  * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
  * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
+ *              For driver internal use only also supports these flags:
+ *
+ *              USE_REAL_VBLANKSTART to use the real start of vblank instead
+ *              of a fudged earlier start of vblank.
+ *
+ *              GET_DISTANCE_TO_VBLANKSTART to return distance to the
+ *              fudged earlier start of vblank in *vpos and the distance
+ *              to true start of vblank in *hpos.
+ *
  * \param *vpos Location where vertical scanout position should be stored.
  * \param *vpos Location where vertical scanout position should be stored.
  * \param *hpos Location where horizontal scanout position should go.
  * \param *hpos Location where horizontal scanout position should go.
  * \param *stime Target location for timestamp taken immediately before
  * \param *stime Target location for timestamp taken immediately before
@@ -776,10 +822,40 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
 		vbl_end = 0;
 		vbl_end = 0;
 	}
 	}
 
 
+	/* Called from driver internal vblank counter query code? */
+	if (flags & GET_DISTANCE_TO_VBLANKSTART) {
+	    /* Caller wants distance from real vbl_start in *hpos */
+	    *hpos = *vpos - vbl_start;
+	}
+
+	/* Fudge vblank to start a few scanlines earlier to handle the
+	 * problem that vblank irqs fire a few scanlines before start
+	 * of vblank. Some driver internal callers need the true vblank
+	 * start to be used and signal this via the USE_REAL_VBLANKSTART flag.
+	 *
+	 * The cause of the "early" vblank irq is that the irq is triggered
+	 * by the line buffer logic when the line buffer read position enters
+	 * the vblank, whereas our crtc scanout position naturally lags the
+	 * line buffer read position.
+	 */
+	if (!(flags & USE_REAL_VBLANKSTART))
+		vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
+
 	/* Test scanout position against vblank region. */
 	/* Test scanout position against vblank region. */
 	if ((*vpos < vbl_start) && (*vpos >= vbl_end))
 	if ((*vpos < vbl_start) && (*vpos >= vbl_end))
 		in_vbl = false;
 		in_vbl = false;
 
 
+	/* In vblank? */
+	if (in_vbl)
+	    ret |= DRM_SCANOUTPOS_IN_VBLANK;
+
+	/* Called from driver internal vblank counter query code? */
+	if (flags & GET_DISTANCE_TO_VBLANKSTART) {
+		/* Caller wants distance from fudged earlier vbl_start */
+		*vpos -= vbl_start;
+		return ret;
+	}
+
 	/* Check if inside vblank area and apply corrective offsets:
 	/* Check if inside vblank area and apply corrective offsets:
 	 * vpos will then be >=0 in video scanout area, but negative
 	 * vpos will then be >=0 in video scanout area, but negative
 	 * within vblank area, counting down the number of lines until
 	 * within vblank area, counting down the number of lines until
@@ -795,32 +871,6 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
 	/* Correct for shifted end of vbl at vbl_end. */
 	/* Correct for shifted end of vbl at vbl_end. */
 	*vpos = *vpos - vbl_end;
 	*vpos = *vpos - vbl_end;
 
 
-	/* In vblank? */
-	if (in_vbl)
-		ret |= DRM_SCANOUTPOS_IN_VBLANK;
-
-	/* Is vpos outside nominal vblank area, but less than
-	 * 1/100 of a frame height away from start of vblank?
-	 * If so, assume this isn't a massively delayed vblank
-	 * interrupt, but a vblank interrupt that fired a few
-	 * microseconds before true start of vblank. Compensate
-	 * by adding a full frame duration to the final timestamp.
-	 * Happens, e.g., on ATI R500, R600.
-	 *
-	 * We only do this if DRM_CALLED_FROM_VBLIRQ.
-	 */
-	if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) {
-		vbl_start = mode->crtc_vdisplay;
-		vtotal = mode->crtc_vtotal;
-
-		if (vbl_start - *vpos < vtotal / 100) {
-			*vpos -= vtotal;
-
-			/* Signal this correction as "applied". */
-			ret |= 0x8;
-		}
-	}
-
 	return ret;
 	return ret;
 }
 }
 
 

+ 3 - 2
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c

@@ -235,8 +235,9 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
 	    AMDGPU_GEM_USERPTR_REGISTER))
 	    AMDGPU_GEM_USERPTR_REGISTER))
 		return -EINVAL;
 		return -EINVAL;
 
 
-	if (!(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) ||
-		   !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
+	if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && (
+	     !(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) ||
+	     !(args->flags & AMDGPU_GEM_USERPTR_REGISTER))) {
 
 
 		/* if we want to write to it we must require anonymous
 		/* if we want to write to it we must require anonymous
 		   memory and install a MMU notifier */
 		   memory and install a MMU notifier */

+ 47 - 1
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c

@@ -611,13 +611,59 @@ void amdgpu_driver_preclose_kms(struct drm_device *dev,
 u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
 u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
 {
 {
 	struct amdgpu_device *adev = dev->dev_private;
 	struct amdgpu_device *adev = dev->dev_private;
+	int vpos, hpos, stat;
+	u32 count;
 
 
 	if (pipe >= adev->mode_info.num_crtc) {
 	if (pipe >= adev->mode_info.num_crtc) {
 		DRM_ERROR("Invalid crtc %u\n", pipe);
 		DRM_ERROR("Invalid crtc %u\n", pipe);
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	return amdgpu_display_vblank_get_counter(adev, pipe);
+	/* The hw increments its frame counter at start of vsync, not at start
+	 * of vblank, as is required by DRM core vblank counter handling.
+	 * Cook the hw count here to make it appear to the caller as if it
+	 * incremented at start of vblank. We measure distance to start of
+	 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
+	 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
+	 * result by 1 to give the proper appearance to caller.
+	 */
+	if (adev->mode_info.crtcs[pipe]) {
+		/* Repeat readout if needed to provide stable result if
+		 * we cross start of vsync during the queries.
+		 */
+		do {
+			count = amdgpu_display_vblank_get_counter(adev, pipe);
+			/* Ask amdgpu_get_crtc_scanoutpos to return vpos as
+			 * distance to start of vblank, instead of regular
+			 * vertical scanout pos.
+			 */
+			stat = amdgpu_get_crtc_scanoutpos(
+				dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
+				&vpos, &hpos, NULL, NULL,
+				&adev->mode_info.crtcs[pipe]->base.hwmode);
+		} while (count != amdgpu_display_vblank_get_counter(adev, pipe));
+
+		if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
+		    (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
+			DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
+		} else {
+			DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
+				      pipe, vpos);
+
+			/* Bump counter if we are at >= leading edge of vblank,
+			 * but before vsync where vpos would turn negative and
+			 * the hw counter really increments.
+			 */
+			if (vpos >= 0)
+				count++;
+		}
+	} else {
+		/* Fallback to use value as is. */
+		count = amdgpu_display_vblank_get_counter(adev, pipe);
+		DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
+	}
+
+	return count;
 }
 }
 
 
 /**
 /**

+ 5 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h

@@ -407,6 +407,7 @@ struct amdgpu_crtc {
 	u32 line_time;
 	u32 line_time;
 	u32 wm_low;
 	u32 wm_low;
 	u32 wm_high;
 	u32 wm_high;
+	u32 lb_vblank_lead_lines;
 	struct drm_display_mode hw_mode;
 	struct drm_display_mode hw_mode;
 };
 };
 
 
@@ -528,6 +529,10 @@ struct amdgpu_framebuffer {
 #define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
 #define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
 				((em) == ATOM_ENCODER_MODE_DP_MST))
 				((em) == ATOM_ENCODER_MODE_DP_MST))
 
 
+/* Driver internal use only flags of amdgpu_get_crtc_scanoutpos() */
+#define USE_REAL_VBLANKSTART 		(1 << 30)
+#define GET_DISTANCE_TO_VBLANKSTART	(1 << 31)
+
 void amdgpu_link_encoder_connector(struct drm_device *dev);
 void amdgpu_link_encoder_connector(struct drm_device *dev);
 
 
 struct drm_connector *
 struct drm_connector *

+ 1 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c

@@ -100,6 +100,7 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
 	list_del_init(&bo->list);
 	list_del_init(&bo->list);
 	mutex_unlock(&bo->adev->gem.mutex);
 	mutex_unlock(&bo->adev->gem.mutex);
 	drm_gem_object_release(&bo->gem_base);
 	drm_gem_object_release(&bo->gem_base);
+	amdgpu_bo_unref(&bo->parent);
 	kfree(bo->metadata);
 	kfree(bo->metadata);
 	kfree(bo);
 	kfree(bo);
 }
 }

+ 11 - 6
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c

@@ -587,9 +587,13 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
 	uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
 	uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
 	int r;
 	int r;
 
 
-	if (gtt->userptr)
-		amdgpu_ttm_tt_pin_userptr(ttm);
-
+	if (gtt->userptr) {
+		r = amdgpu_ttm_tt_pin_userptr(ttm);
+		if (r) {
+			DRM_ERROR("failed to pin userptr\n");
+			return r;
+		}
+	}
 	gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
 	gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
 	if (!ttm->num_pages) {
 	if (!ttm->num_pages) {
 		WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
 		WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
@@ -797,11 +801,12 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
 	if (mem && mem->mem_type != TTM_PL_SYSTEM)
 	if (mem && mem->mem_type != TTM_PL_SYSTEM)
 		flags |= AMDGPU_PTE_VALID;
 		flags |= AMDGPU_PTE_VALID;
 
 
-	if (mem && mem->mem_type == TTM_PL_TT)
+	if (mem && mem->mem_type == TTM_PL_TT) {
 		flags |= AMDGPU_PTE_SYSTEM;
 		flags |= AMDGPU_PTE_SYSTEM;
 
 
-	if (!ttm || ttm->caching_state == tt_cached)
-		flags |= AMDGPU_PTE_SNOOPED;
+		if (ttm->caching_state == tt_cached)
+			flags |= AMDGPU_PTE_SNOOPED;
+	}
 
 
 	if (adev->asic_type >= CHIP_TOPAZ)
 	if (adev->asic_type >= CHIP_TOPAZ)
 		flags |= AMDGPU_PTE_EXECUTABLE;
 		flags |= AMDGPU_PTE_EXECUTABLE;

+ 18 - 3
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

@@ -885,17 +885,21 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
 	struct amdgpu_bo_va_mapping *mapping;
 	struct amdgpu_bo_va_mapping *mapping;
 	int r;
 	int r;
 
 
+	spin_lock(&vm->freed_lock);
 	while (!list_empty(&vm->freed)) {
 	while (!list_empty(&vm->freed)) {
 		mapping = list_first_entry(&vm->freed,
 		mapping = list_first_entry(&vm->freed,
 			struct amdgpu_bo_va_mapping, list);
 			struct amdgpu_bo_va_mapping, list);
 		list_del(&mapping->list);
 		list_del(&mapping->list);
-
+		spin_unlock(&vm->freed_lock);
 		r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL);
 		r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL);
 		kfree(mapping);
 		kfree(mapping);
 		if (r)
 		if (r)
 			return r;
 			return r;
 
 
+		spin_lock(&vm->freed_lock);
 	}
 	}
+	spin_unlock(&vm->freed_lock);
+
 	return 0;
 	return 0;
 
 
 }
 }
@@ -1079,6 +1083,11 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
 		if (r)
 		if (r)
 			goto error_free;
 			goto error_free;
 
 
+		/* Keep a reference to the page table to avoid freeing
+		 * them up in the wrong order.
+		 */
+		pt->parent = amdgpu_bo_ref(vm->page_directory);
+
 		r = amdgpu_vm_clear_bo(adev, pt);
 		r = amdgpu_vm_clear_bo(adev, pt);
 		if (r) {
 		if (r) {
 			amdgpu_bo_unref(&pt);
 			amdgpu_bo_unref(&pt);
@@ -1150,10 +1159,13 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
 	spin_unlock(&vm->it_lock);
 	spin_unlock(&vm->it_lock);
 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
 
 
-	if (valid)
+	if (valid) {
+		spin_lock(&vm->freed_lock);
 		list_add(&mapping->list, &vm->freed);
 		list_add(&mapping->list, &vm->freed);
-	else
+		spin_unlock(&vm->freed_lock);
+	} else {
 		kfree(mapping);
 		kfree(mapping);
+	}
 
 
 	return 0;
 	return 0;
 }
 }
@@ -1186,7 +1198,9 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
 		interval_tree_remove(&mapping->it, &vm->va);
 		interval_tree_remove(&mapping->it, &vm->va);
 		spin_unlock(&vm->it_lock);
 		spin_unlock(&vm->it_lock);
 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
+		spin_lock(&vm->freed_lock);
 		list_add(&mapping->list, &vm->freed);
 		list_add(&mapping->list, &vm->freed);
+		spin_unlock(&vm->freed_lock);
 	}
 	}
 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
 		list_del(&mapping->list);
 		list_del(&mapping->list);
@@ -1247,6 +1261,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 	INIT_LIST_HEAD(&vm->cleared);
 	INIT_LIST_HEAD(&vm->cleared);
 	INIT_LIST_HEAD(&vm->freed);
 	INIT_LIST_HEAD(&vm->freed);
 	spin_lock_init(&vm->it_lock);
 	spin_lock_init(&vm->it_lock);
+	spin_lock_init(&vm->freed_lock);
 	pd_size = amdgpu_vm_directory_size(adev);
 	pd_size = amdgpu_vm_directory_size(adev);
 	pd_entries = amdgpu_vm_num_pdes(adev);
 	pd_entries = amdgpu_vm_num_pdes(adev);
 
 

+ 4 - 1
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c

@@ -1250,7 +1250,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
 	u32 pixel_period;
 	u32 pixel_period;
 	u32 line_time = 0;
 	u32 line_time = 0;
 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
-	u32 tmp, wm_mask;
+	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
 
 
 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
 		pixel_period = 1000000 / (u32)mode->clock;
 		pixel_period = 1000000 / (u32)mode->clock;
@@ -1333,6 +1333,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
 		    (adev->mode_info.disp_priority == 2)) {
 		    (adev->mode_info.disp_priority == 2)) {
 			DRM_DEBUG_KMS("force priority to high\n");
 			DRM_DEBUG_KMS("force priority to high\n");
 		}
 		}
+		lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
 	}
 	}
 
 
 	/* select wm A */
 	/* select wm A */
@@ -1357,6 +1358,8 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
 	amdgpu_crtc->line_time = line_time;
 	amdgpu_crtc->line_time = line_time;
 	amdgpu_crtc->wm_high = latency_watermark_a;
 	amdgpu_crtc->wm_high = latency_watermark_a;
 	amdgpu_crtc->wm_low = latency_watermark_b;
 	amdgpu_crtc->wm_low = latency_watermark_b;
+	/* Save number of lines the linebuffer leads before the scanout */
+	amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
 }
 }
 
 
 /**
 /**

+ 4 - 1
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c

@@ -1238,7 +1238,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
 	u32 pixel_period;
 	u32 pixel_period;
 	u32 line_time = 0;
 	u32 line_time = 0;
 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
-	u32 tmp, wm_mask;
+	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
 
 
 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
 		pixel_period = 1000000 / (u32)mode->clock;
 		pixel_period = 1000000 / (u32)mode->clock;
@@ -1321,6 +1321,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
 		    (adev->mode_info.disp_priority == 2)) {
 		    (adev->mode_info.disp_priority == 2)) {
 			DRM_DEBUG_KMS("force priority to high\n");
 			DRM_DEBUG_KMS("force priority to high\n");
 		}
 		}
+		lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
 	}
 	}
 
 
 	/* select wm A */
 	/* select wm A */
@@ -1345,6 +1346,8 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
 	amdgpu_crtc->line_time = line_time;
 	amdgpu_crtc->line_time = line_time;
 	amdgpu_crtc->wm_high = latency_watermark_a;
 	amdgpu_crtc->wm_high = latency_watermark_a;
 	amdgpu_crtc->wm_low = latency_watermark_b;
 	amdgpu_crtc->wm_low = latency_watermark_b;
+	/* Save number of lines the linebuffer leads before the scanout */
+	amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
 }
 }
 
 
 /**
 /**

+ 4 - 1
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c

@@ -1193,7 +1193,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
 	u32 pixel_period;
 	u32 pixel_period;
 	u32 line_time = 0;
 	u32 line_time = 0;
 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
-	u32 tmp, wm_mask;
+	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
 
 
 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
 		pixel_period = 1000000 / (u32)mode->clock;
 		pixel_period = 1000000 / (u32)mode->clock;
@@ -1276,6 +1276,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
 		    (adev->mode_info.disp_priority == 2)) {
 		    (adev->mode_info.disp_priority == 2)) {
 			DRM_DEBUG_KMS("force priority to high\n");
 			DRM_DEBUG_KMS("force priority to high\n");
 		}
 		}
+		lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
 	}
 	}
 
 
 	/* select wm A */
 	/* select wm A */
@@ -1302,6 +1303,8 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
 	amdgpu_crtc->line_time = line_time;
 	amdgpu_crtc->line_time = line_time;
 	amdgpu_crtc->wm_high = latency_watermark_a;
 	amdgpu_crtc->wm_high = latency_watermark_a;
 	amdgpu_crtc->wm_low = latency_watermark_b;
 	amdgpu_crtc->wm_low = latency_watermark_b;
+	/* Save number of lines the linebuffer leads before the scanout */
+	amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
 }
 }
 
 
 /**
 /**

+ 1 - 1
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c

@@ -513,7 +513,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
 	WREG32(mmVM_L2_CNTL3, tmp);
 	WREG32(mmVM_L2_CNTL3, tmp);
 	/* setup context0 */
 	/* setup context0 */
 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
-	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, (adev->mc.gtt_end >> 12) - 1);
+	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
 	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
 	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
 			(u32)(adev->dummy_page.addr >> 12));
 			(u32)(adev->dummy_page.addr >> 12));

+ 1 - 1
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c

@@ -657,7 +657,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
 	WREG32(mmVM_L2_CNTL4, tmp);
 	WREG32(mmVM_L2_CNTL4, tmp);
 	/* setup context0 */
 	/* setup context0 */
 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
-	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, (adev->mc.gtt_end >> 12) - 1);
+	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
 	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
 	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
 			(u32)(adev->dummy_page.addr >> 12));
 			(u32)(adev->dummy_page.addr >> 12));

+ 3 - 2
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c

@@ -288,6 +288,7 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity)
  */
  */
 static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
 static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
 {
 {
+	struct amd_gpu_scheduler *sched = sched_job->sched;
 	struct amd_sched_entity *entity = sched_job->s_entity;
 	struct amd_sched_entity *entity = sched_job->s_entity;
 	bool added, first = false;
 	bool added, first = false;
 
 
@@ -302,7 +303,7 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
 
 
 	/* first job wakes up scheduler */
 	/* first job wakes up scheduler */
 	if (first)
 	if (first)
-		amd_sched_wakeup(sched_job->sched);
+		amd_sched_wakeup(sched);
 
 
 	return added;
 	return added;
 }
 }
@@ -318,9 +319,9 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
 {
 {
 	struct amd_sched_entity *entity = sched_job->s_entity;
 	struct amd_sched_entity *entity = sched_job->s_entity;
 
 
+	trace_amd_sched_job(sched_job);
 	wait_event(entity->sched->job_scheduled,
 	wait_event(entity->sched->job_scheduled,
 		   amd_sched_entity_in(sched_job));
 		   amd_sched_entity_in(sched_job));
-	trace_amd_sched_job(sched_job);
 }
 }
 
 
 /**
 /**

+ 5 - 0
drivers/gpu/drm/drm_drv.c

@@ -160,6 +160,11 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
 		goto out_unlock;
 		goto out_unlock;
 	}
 	}
 
 
+	if (!file_priv->allowed_master) {
+		ret = drm_new_set_master(dev, file_priv);
+		goto out_unlock;
+	}
+
 	file_priv->minor->master = drm_master_get(file_priv->master);
 	file_priv->minor->master = drm_master_get(file_priv->master);
 	file_priv->is_master = 1;
 	file_priv->is_master = 1;
 	if (dev->driver->master_set) {
 	if (dev->driver->master_set) {

+ 56 - 28
drivers/gpu/drm/drm_fops.c

@@ -125,6 +125,60 @@ static int drm_cpu_valid(void)
 	return 1;
 	return 1;
 }
 }
 
 
+/**
+ * drm_new_set_master - Allocate a new master object and become master for the
+ * associated master realm.
+ *
+ * @dev: The associated device.
+ * @fpriv: File private identifying the client.
+ *
+ * This function must be called with dev::struct_mutex held.
+ * Returns negative error code on failure. Zero on success.
+ */
+int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
+{
+	struct drm_master *old_master;
+	int ret;
+
+	lockdep_assert_held_once(&dev->master_mutex);
+
+	/* create a new master */
+	fpriv->minor->master = drm_master_create(fpriv->minor);
+	if (!fpriv->minor->master)
+		return -ENOMEM;
+
+	/* take another reference for the copy in the local file priv */
+	old_master = fpriv->master;
+	fpriv->master = drm_master_get(fpriv->minor->master);
+
+	if (dev->driver->master_create) {
+		ret = dev->driver->master_create(dev, fpriv->master);
+		if (ret)
+			goto out_err;
+	}
+	if (dev->driver->master_set) {
+		ret = dev->driver->master_set(dev, fpriv, true);
+		if (ret)
+			goto out_err;
+	}
+
+	fpriv->is_master = 1;
+	fpriv->allowed_master = 1;
+	fpriv->authenticated = 1;
+	if (old_master)
+		drm_master_put(&old_master);
+
+	return 0;
+
+out_err:
+	/* drop both references and restore old master on failure */
+	drm_master_put(&fpriv->minor->master);
+	drm_master_put(&fpriv->master);
+	fpriv->master = old_master;
+
+	return ret;
+}
+
 /**
 /**
  * Called whenever a process opens /dev/drm.
  * Called whenever a process opens /dev/drm.
  *
  *
@@ -191,35 +245,9 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
 	mutex_lock(&dev->master_mutex);
 	mutex_lock(&dev->master_mutex);
 	if (drm_is_primary_client(priv) && !priv->minor->master) {
 	if (drm_is_primary_client(priv) && !priv->minor->master) {
 		/* create a new master */
 		/* create a new master */
-		priv->minor->master = drm_master_create(priv->minor);
-		if (!priv->minor->master) {
-			ret = -ENOMEM;
+		ret = drm_new_set_master(dev, priv);
+		if (ret)
 			goto out_close;
 			goto out_close;
-		}
-
-		priv->is_master = 1;
-		/* take another reference for the copy in the local file priv */
-		priv->master = drm_master_get(priv->minor->master);
-		priv->authenticated = 1;
-
-		if (dev->driver->master_create) {
-			ret = dev->driver->master_create(dev, priv->master);
-			if (ret) {
-				/* drop both references if this fails */
-				drm_master_put(&priv->minor->master);
-				drm_master_put(&priv->master);
-				goto out_close;
-			}
-		}
-		if (dev->driver->master_set) {
-			ret = dev->driver->master_set(dev, priv, true);
-			if (ret) {
-				/* drop both references if this fails */
-				drm_master_put(&priv->minor->master);
-				drm_master_put(&priv->master);
-				goto out_close;
-			}
-		}
 	} else if (drm_is_primary_client(priv)) {
 	} else if (drm_is_primary_client(priv)) {
 		/* get a reference to the master */
 		/* get a reference to the master */
 		priv->master = drm_master_get(priv->minor->master);
 		priv->master = drm_master_get(priv->minor->master);

+ 53 - 1
drivers/gpu/drm/drm_irq.c

@@ -980,7 +980,8 @@ static void send_vblank_event(struct drm_device *dev,
 		struct drm_pending_vblank_event *e,
 		struct drm_pending_vblank_event *e,
 		unsigned long seq, struct timeval *now)
 		unsigned long seq, struct timeval *now)
 {
 {
-	WARN_ON_SMP(!spin_is_locked(&dev->event_lock));
+	assert_spin_locked(&dev->event_lock);
+
 	e->event.sequence = seq;
 	e->event.sequence = seq;
 	e->event.tv_sec = now->tv_sec;
 	e->event.tv_sec = now->tv_sec;
 	e->event.tv_usec = now->tv_usec;
 	e->event.tv_usec = now->tv_usec;
@@ -992,6 +993,57 @@ static void send_vblank_event(struct drm_device *dev,
 					 e->event.sequence);
 					 e->event.sequence);
 }
 }
 
 
+/**
+ * drm_arm_vblank_event - arm vblank event after pageflip
+ * @dev: DRM device
+ * @pipe: CRTC index
+ * @e: the event to prepare to send
+ *
+ * A lot of drivers need to generate vblank events for the very next vblank
+ * interrupt. For example when the page flip interrupt happens when the page
+ * flip gets armed, but not when it actually executes within the next vblank
+ * period. This helper function implements exactly the required vblank arming
+ * behaviour.
+ *
+ * Caller must hold event lock. Caller must also hold a vblank reference for
+ * the event @e, which will be dropped when the next vblank arrives.
+ *
+ * This is the legacy version of drm_crtc_arm_vblank_event().
+ */
+void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe,
+			  struct drm_pending_vblank_event *e)
+{
+	assert_spin_locked(&dev->event_lock);
+
+	e->pipe = pipe;
+	e->event.sequence = drm_vblank_count(dev, pipe);
+	list_add_tail(&e->base.link, &dev->vblank_event_list);
+}
+EXPORT_SYMBOL(drm_arm_vblank_event);
+
+/**
+ * drm_crtc_arm_vblank_event - arm vblank event after pageflip
+ * @crtc: the source CRTC of the vblank event
+ * @e: the event to send
+ *
+ * A lot of drivers need to generate vblank events for the very next vblank
+ * interrupt. For example when the page flip interrupt happens when the page
+ * flip gets armed, but not when it actually executes within the next vblank
+ * period. This helper function implements exactly the required vblank arming
+ * behaviour.
+ *
+ * Caller must hold event lock. Caller must also hold a vblank reference for
+ * the event @e, which will be dropped when the next vblank arrives.
+ *
+ * This is the native KMS version of drm_arm_vblank_event().
+ */
+void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
+			       struct drm_pending_vblank_event *e)
+{
+	drm_arm_vblank_event(crtc->dev, drm_crtc_index(crtc), e);
+}
+EXPORT_SYMBOL(drm_crtc_arm_vblank_event);
+
 /**
 /**
  * drm_send_vblank_event - helper to send vblank event after pageflip
  * drm_send_vblank_event - helper to send vblank event after pageflip
  * @dev: DRM device
  * @dev: DRM device

+ 10 - 2
drivers/gpu/drm/i915/i915_gem.c

@@ -1210,8 +1210,16 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
 	if (i915_gem_request_completed(req, true))
 	if (i915_gem_request_completed(req, true))
 		return 0;
 		return 0;
 
 
-	timeout_expire = timeout ?
-		jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0;
+	timeout_expire = 0;
+	if (timeout) {
+		if (WARN_ON(*timeout < 0))
+			return -EINVAL;
+
+		if (*timeout == 0)
+			return -ETIME;
+
+		timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout);
+	}
 
 
 	if (INTEL_INFO(dev_priv)->gen >= 6)
 	if (INTEL_INFO(dev_priv)->gen >= 6)
 		gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
 		gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);

+ 27 - 9
drivers/gpu/drm/i915/i915_gem_fence.c

@@ -642,11 +642,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
 		}
 		}
 
 
 		/* check for L-shaped memory aka modified enhanced addressing */
 		/* check for L-shaped memory aka modified enhanced addressing */
-		if (IS_GEN4(dev)) {
-			uint32_t ddc2 = I915_READ(DCC2);
-
-			if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
-				dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
+		if (IS_GEN4(dev) &&
+		    !(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
+			swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+			swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
 		}
 		}
 
 
 		if (dcc == 0xffffffff) {
 		if (dcc == 0xffffffff) {
@@ -675,16 +674,35 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
 		 * matching, which was the case for the swizzling required in
 		 * matching, which was the case for the swizzling required in
 		 * the table above, or from the 1-ch value being less than
 		 * the table above, or from the 1-ch value being less than
 		 * the minimum size of a rank.
 		 * the minimum size of a rank.
+		 *
+		 * Reports indicate that the swizzling actually
+		 * varies depending upon page placement inside the
+		 * channels, i.e. we see swizzled pages where the
+		 * banks of memory are paired and unswizzled on the
+		 * uneven portion, so leave that as unknown.
 		 */
 		 */
-		if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
-			swizzle_x = I915_BIT_6_SWIZZLE_NONE;
-			swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-		} else {
+		if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) {
 			swizzle_x = I915_BIT_6_SWIZZLE_9_10;
 			swizzle_x = I915_BIT_6_SWIZZLE_9_10;
 			swizzle_y = I915_BIT_6_SWIZZLE_9;
 			swizzle_y = I915_BIT_6_SWIZZLE_9;
 		}
 		}
 	}
 	}
 
 
+	if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN ||
+	    swizzle_y == I915_BIT_6_SWIZZLE_UNKNOWN) {
+		/* Userspace likes to explode if it sees unknown swizzling,
+		 * so lie. We will finish the lie when reporting through
+		 * the get-tiling-ioctl by reporting the physical swizzle
+		 * mode as unknown instead.
+		 *
+		 * As we don't strictly know what the swizzling is, it may be
+		 * bit17 dependent, and so we need to also prevent the pages
+		 * from being moved.
+		 */
+		dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
+		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+	}
+
 	dev_priv->mm.bit_6_swizzle_x = swizzle_x;
 	dev_priv->mm.bit_6_swizzle_x = swizzle_x;
 	dev_priv->mm.bit_6_swizzle_y = swizzle_y;
 	dev_priv->mm.bit_6_swizzle_y = swizzle_y;
 }
 }

+ 0 - 1
drivers/gpu/drm/i915/intel_display.c

@@ -12582,7 +12582,6 @@ intel_pipe_config_compare(struct drm_device *dev,
 	if (INTEL_INFO(dev)->gen < 8) {
 	if (INTEL_INFO(dev)->gen < 8) {
 		PIPE_CONF_CHECK_M_N(dp_m_n);
 		PIPE_CONF_CHECK_M_N(dp_m_n);
 
 
-		PIPE_CONF_CHECK_I(has_drrs);
 		if (current_config->has_drrs)
 		if (current_config->has_drrs)
 			PIPE_CONF_CHECK_M_N(dp_m2_n2);
 			PIPE_CONF_CHECK_M_N(dp_m2_n2);
 	} else
 	} else

+ 2 - 1
drivers/gpu/drm/i915/intel_dp.c

@@ -4962,7 +4962,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
 	enum intel_display_power_domain power_domain;
 	enum intel_display_power_domain power_domain;
 	enum irqreturn ret = IRQ_NONE;
 	enum irqreturn ret = IRQ_NONE;
 
 
-	if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
+	if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
+	    intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
 		intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
 		intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
 
 
 	if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
 	if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {

+ 3 - 4
drivers/gpu/drm/imx/imx-drm-core.c

@@ -64,8 +64,7 @@ static void imx_drm_driver_lastclose(struct drm_device *drm)
 {
 {
 	struct imx_drm_device *imxdrm = drm->dev_private;
 	struct imx_drm_device *imxdrm = drm->dev_private;
 
 
-	if (imxdrm->fbhelper)
-		drm_fbdev_cma_restore_mode(imxdrm->fbhelper);
+	drm_fbdev_cma_restore_mode(imxdrm->fbhelper);
 }
 }
 
 
 static int imx_drm_driver_unload(struct drm_device *drm)
 static int imx_drm_driver_unload(struct drm_device *drm)
@@ -334,7 +333,7 @@ err_kms:
  * imx_drm_add_crtc - add a new crtc
  * imx_drm_add_crtc - add a new crtc
  */
  */
 int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
 int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
-		struct imx_drm_crtc **new_crtc,
+		struct imx_drm_crtc **new_crtc, struct drm_plane *primary_plane,
 		const struct imx_drm_crtc_helper_funcs *imx_drm_helper_funcs,
 		const struct imx_drm_crtc_helper_funcs *imx_drm_helper_funcs,
 		struct device_node *port)
 		struct device_node *port)
 {
 {
@@ -373,7 +372,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
 	drm_crtc_helper_add(crtc,
 	drm_crtc_helper_add(crtc,
 			imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
 			imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
 
 
-	drm_crtc_init(drm, crtc,
+	drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
 			imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
 			imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
 
 
 	return 0;
 	return 0;

+ 2 - 1
drivers/gpu/drm/imx/imx-drm.h

@@ -9,6 +9,7 @@ struct drm_display_mode;
 struct drm_encoder;
 struct drm_encoder;
 struct drm_fbdev_cma;
 struct drm_fbdev_cma;
 struct drm_framebuffer;
 struct drm_framebuffer;
+struct drm_plane;
 struct imx_drm_crtc;
 struct imx_drm_crtc;
 struct platform_device;
 struct platform_device;
 
 
@@ -24,7 +25,7 @@ struct imx_drm_crtc_helper_funcs {
 };
 };
 
 
 int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
 int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
-		struct imx_drm_crtc **new_crtc,
+		struct imx_drm_crtc **new_crtc, struct drm_plane *primary_plane,
 		const struct imx_drm_crtc_helper_funcs *imx_helper_funcs,
 		const struct imx_drm_crtc_helper_funcs *imx_helper_funcs,
 		struct device_node *port);
 		struct device_node *port);
 int imx_drm_remove_crtc(struct imx_drm_crtc *);
 int imx_drm_remove_crtc(struct imx_drm_crtc *);

+ 1 - 0
drivers/gpu/drm/imx/imx-tve.c

@@ -721,6 +721,7 @@ static const struct of_device_id imx_tve_dt_ids[] = {
 	{ .compatible = "fsl,imx53-tve", },
 	{ .compatible = "fsl,imx53-tve", },
 	{ /* sentinel */ }
 	{ /* sentinel */ }
 };
 };
+MODULE_DEVICE_TABLE(of, imx_tve_dt_ids);
 
 
 static struct platform_driver imx_tve_driver = {
 static struct platform_driver imx_tve_driver = {
 	.probe		= imx_tve_probe,
 	.probe		= imx_tve_probe,

+ 17 - 46
drivers/gpu/drm/imx/ipuv3-crtc.c

@@ -212,7 +212,8 @@ static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc)
 
 
 	spin_lock_irqsave(&drm->event_lock, flags);
 	spin_lock_irqsave(&drm->event_lock, flags);
 	if (ipu_crtc->page_flip_event)
 	if (ipu_crtc->page_flip_event)
-		drm_send_vblank_event(drm, -1, ipu_crtc->page_flip_event);
+		drm_crtc_send_vblank_event(&ipu_crtc->base,
+					   ipu_crtc->page_flip_event);
 	ipu_crtc->page_flip_event = NULL;
 	ipu_crtc->page_flip_event = NULL;
 	imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
 	imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
 	spin_unlock_irqrestore(&drm->event_lock, flags);
 	spin_unlock_irqrestore(&drm->event_lock, flags);
@@ -349,7 +350,6 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
 	struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
 	struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
 	int dp = -EINVAL;
 	int dp = -EINVAL;
 	int ret;
 	int ret;
-	int id;
 
 
 	ret = ipu_get_resources(ipu_crtc, pdata);
 	ret = ipu_get_resources(ipu_crtc, pdata);
 	if (ret) {
 	if (ret) {
@@ -358,18 +358,23 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
 		return ret;
 		return ret;
 	}
 	}
 
 
+	if (pdata->dp >= 0)
+		dp = IPU_DP_FLOW_SYNC_BG;
+	ipu_crtc->plane[0] = ipu_plane_init(drm, ipu, pdata->dma[0], dp, 0,
+					    DRM_PLANE_TYPE_PRIMARY);
+	if (IS_ERR(ipu_crtc->plane[0])) {
+		ret = PTR_ERR(ipu_crtc->plane[0]);
+		goto err_put_resources;
+	}
+
 	ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc,
 	ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc,
-			&ipu_crtc_helper_funcs, ipu_crtc->dev->of_node);
+			&ipu_crtc->plane[0]->base, &ipu_crtc_helper_funcs,
+			ipu_crtc->dev->of_node);
 	if (ret) {
 	if (ret) {
 		dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret);
 		dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret);
 		goto err_put_resources;
 		goto err_put_resources;
 	}
 	}
 
 
-	if (pdata->dp >= 0)
-		dp = IPU_DP_FLOW_SYNC_BG;
-	id = imx_drm_crtc_id(ipu_crtc->imx_crtc);
-	ipu_crtc->plane[0] = ipu_plane_init(ipu_crtc->base.dev, ipu,
-					    pdata->dma[0], dp, BIT(id), true);
 	ret = ipu_plane_get_resources(ipu_crtc->plane[0]);
 	ret = ipu_plane_get_resources(ipu_crtc->plane[0]);
 	if (ret) {
 	if (ret) {
 		dev_err(ipu_crtc->dev, "getting plane 0 resources failed with %d.\n",
 		dev_err(ipu_crtc->dev, "getting plane 0 resources failed with %d.\n",
@@ -379,10 +384,10 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
 
 
 	/* If this crtc is using the DP, add an overlay plane */
 	/* If this crtc is using the DP, add an overlay plane */
 	if (pdata->dp >= 0 && pdata->dma[1] > 0) {
 	if (pdata->dp >= 0 && pdata->dma[1] > 0) {
-		ipu_crtc->plane[1] = ipu_plane_init(ipu_crtc->base.dev, ipu,
-						    pdata->dma[1],
-						    IPU_DP_FLOW_SYNC_FG,
-						    BIT(id), false);
+		ipu_crtc->plane[1] = ipu_plane_init(drm, ipu, pdata->dma[1],
+						IPU_DP_FLOW_SYNC_FG,
+						drm_crtc_mask(&ipu_crtc->base),
+						DRM_PLANE_TYPE_OVERLAY);
 		if (IS_ERR(ipu_crtc->plane[1]))
 		if (IS_ERR(ipu_crtc->plane[1]))
 			ipu_crtc->plane[1] = NULL;
 			ipu_crtc->plane[1] = NULL;
 	}
 	}
@@ -407,28 +412,6 @@ err_put_resources:
 	return ret;
 	return ret;
 }
 }
 
 
-static struct device_node *ipu_drm_get_port_by_id(struct device_node *parent,
-						  int port_id)
-{
-	struct device_node *port;
-	int id, ret;
-
-	port = of_get_child_by_name(parent, "port");
-	while (port) {
-		ret = of_property_read_u32(port, "reg", &id);
-		if (!ret && id == port_id)
-			return port;
-
-		do {
-			port = of_get_next_child(parent, port);
-			if (!port)
-				return NULL;
-		} while (of_node_cmp(port->name, "port"));
-	}
-
-	return NULL;
-}
-
 static int ipu_drm_bind(struct device *dev, struct device *master, void *data)
 static int ipu_drm_bind(struct device *dev, struct device *master, void *data)
 {
 {
 	struct ipu_client_platformdata *pdata = dev->platform_data;
 	struct ipu_client_platformdata *pdata = dev->platform_data;
@@ -470,23 +453,11 @@ static const struct component_ops ipu_crtc_ops = {
 static int ipu_drm_probe(struct platform_device *pdev)
 static int ipu_drm_probe(struct platform_device *pdev)
 {
 {
 	struct device *dev = &pdev->dev;
 	struct device *dev = &pdev->dev;
-	struct ipu_client_platformdata *pdata = dev->platform_data;
 	int ret;
 	int ret;
 
 
 	if (!dev->platform_data)
 	if (!dev->platform_data)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	if (!dev->of_node) {
-		/* Associate crtc device with the corresponding DI port node */
-		dev->of_node = ipu_drm_get_port_by_id(dev->parent->of_node,
-						      pdata->di + 2);
-		if (!dev->of_node) {
-			dev_err(dev, "missing port@%d node in %s\n",
-				pdata->di + 2, dev->parent->of_node->full_name);
-			return -ENODEV;
-		}
-	}
-
 	ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
 	ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;

+ 4 - 5
drivers/gpu/drm/imx/ipuv3-plane.c

@@ -381,7 +381,7 @@ static struct drm_plane_funcs ipu_plane_funcs = {
 
 
 struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
 struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
 				 int dma, int dp, unsigned int possible_crtcs,
 				 int dma, int dp, unsigned int possible_crtcs,
-				 bool priv)
+				 enum drm_plane_type type)
 {
 {
 	struct ipu_plane *ipu_plane;
 	struct ipu_plane *ipu_plane;
 	int ret;
 	int ret;
@@ -399,10 +399,9 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
 	ipu_plane->dma = dma;
 	ipu_plane->dma = dma;
 	ipu_plane->dp_flow = dp;
 	ipu_plane->dp_flow = dp;
 
 
-	ret = drm_plane_init(dev, &ipu_plane->base, possible_crtcs,
-			     &ipu_plane_funcs, ipu_plane_formats,
-			     ARRAY_SIZE(ipu_plane_formats),
-			     priv);
+	ret = drm_universal_plane_init(dev, &ipu_plane->base, possible_crtcs,
+				       &ipu_plane_funcs, ipu_plane_formats,
+				       ARRAY_SIZE(ipu_plane_formats), type);
 	if (ret) {
 	if (ret) {
 		DRM_ERROR("failed to initialize plane\n");
 		DRM_ERROR("failed to initialize plane\n");
 		kfree(ipu_plane);
 		kfree(ipu_plane);

+ 1 - 1
drivers/gpu/drm/imx/ipuv3-plane.h

@@ -34,7 +34,7 @@ struct ipu_plane {
 
 
 struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
 struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
 				 int dma, int dp, unsigned int possible_crtcs,
 				 int dma, int dp, unsigned int possible_crtcs,
-				 bool priv);
+				 enum drm_plane_type type);
 
 
 /* Init IDMAC, DMFC, DP */
 /* Init IDMAC, DMFC, DP */
 int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc,
 int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc,

+ 4 - 0
drivers/gpu/drm/imx/parallel-display.c

@@ -54,7 +54,11 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
 
 
 	if (imxpd->panel && imxpd->panel->funcs &&
 	if (imxpd->panel && imxpd->panel->funcs &&
 	    imxpd->panel->funcs->get_modes) {
 	    imxpd->panel->funcs->get_modes) {
+		struct drm_display_info *di = &connector->display_info;
+
 		num_modes = imxpd->panel->funcs->get_modes(imxpd->panel);
 		num_modes = imxpd->panel->funcs->get_modes(imxpd->panel);
+		if (!imxpd->bus_format && di->num_bus_formats)
+			imxpd->bus_format = di->bus_formats[0];
 		if (num_modes > 0)
 		if (num_modes > 0)
 			return num_modes;
 			return num_modes;
 	}
 	}

+ 11 - 8
drivers/gpu/drm/nouveau/nouveau_display.c

@@ -829,7 +829,6 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
 	struct drm_device *dev = drm->dev;
 	struct drm_device *dev = drm->dev;
 	struct nouveau_page_flip_state *s;
 	struct nouveau_page_flip_state *s;
 	unsigned long flags;
 	unsigned long flags;
-	int crtcid = -1;
 
 
 	spin_lock_irqsave(&dev->event_lock, flags);
 	spin_lock_irqsave(&dev->event_lock, flags);
 
 
@@ -841,15 +840,19 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
 
 
 	s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
 	s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
 	if (s->event) {
 	if (s->event) {
-		/* Vblank timestamps/counts are only correct on >= NV-50 */
-		if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
-			crtcid = s->crtc;
+		if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
+			drm_arm_vblank_event(dev, s->crtc, s->event);
+		} else {
+			drm_send_vblank_event(dev, s->crtc, s->event);
 
 
-		drm_send_vblank_event(dev, crtcid, s->event);
+			/* Give up ownership of vblank for page-flipped crtc */
+			drm_vblank_put(dev, s->crtc);
+		}
+	}
+	else {
+		/* Give up ownership of vblank for page-flipped crtc */
+		drm_vblank_put(dev, s->crtc);
 	}
 	}
-
-	/* Give up ownership of vblank for page-flipped crtc */
-	drm_vblank_put(dev, s->crtc);
 
 
 	list_del(&s->head);
 	list_del(&s->head);
 	if (ps)
 	if (ps)

+ 4 - 1
drivers/gpu/drm/radeon/cik.c

@@ -8472,7 +8472,7 @@ restart_ih:
 	if (queue_dp)
 	if (queue_dp)
 		schedule_work(&rdev->dp_work);
 		schedule_work(&rdev->dp_work);
 	if (queue_hotplug)
 	if (queue_hotplug)
-		schedule_work(&rdev->hotplug_work);
+		schedule_delayed_work(&rdev->hotplug_work, 0);
 	if (queue_reset) {
 	if (queue_reset) {
 		rdev->needs_reset = true;
 		rdev->needs_reset = true;
 		wake_up_all(&rdev->fence_queue);
 		wake_up_all(&rdev->fence_queue);
@@ -9630,6 +9630,9 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
 		    (rdev->disp_priority == 2)) {
 		    (rdev->disp_priority == 2)) {
 			DRM_DEBUG_KMS("force priority to high\n");
 			DRM_DEBUG_KMS("force priority to high\n");
 		}
 		}
+
+		/* Save number of lines the linebuffer leads before the scanout */
+		radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
 	}
 	}
 
 
 	/* select wm A */
 	/* select wm A */

+ 4 - 1
drivers/gpu/drm/radeon/evergreen.c

@@ -2372,6 +2372,9 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
 		c.full = dfixed_div(c, a);
 		c.full = dfixed_div(c, a);
 		priority_b_mark = dfixed_trunc(c);
 		priority_b_mark = dfixed_trunc(c);
 		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
 		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
+
+		/* Save number of lines the linebuffer leads before the scanout */
+		radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
 	}
 	}
 
 
 	/* select wm A */
 	/* select wm A */
@@ -5344,7 +5347,7 @@ restart_ih:
 	if (queue_dp)
 	if (queue_dp)
 		schedule_work(&rdev->dp_work);
 		schedule_work(&rdev->dp_work);
 	if (queue_hotplug)
 	if (queue_hotplug)
-		schedule_work(&rdev->hotplug_work);
+		schedule_delayed_work(&rdev->hotplug_work, 0);
 	if (queue_hdmi)
 	if (queue_hdmi)
 		schedule_work(&rdev->audio_work);
 		schedule_work(&rdev->audio_work);
 	if (queue_thermal && rdev->pm.dpm_enabled)
 	if (queue_thermal && rdev->pm.dpm_enabled)

+ 11 - 1
drivers/gpu/drm/radeon/r100.c

@@ -806,7 +806,7 @@ int r100_irq_process(struct radeon_device *rdev)
 		status = r100_irq_ack(rdev);
 		status = r100_irq_ack(rdev);
 	}
 	}
 	if (queue_hotplug)
 	if (queue_hotplug)
-		schedule_work(&rdev->hotplug_work);
+		schedule_delayed_work(&rdev->hotplug_work, 0);
 	if (rdev->msi_enabled) {
 	if (rdev->msi_enabled) {
 		switch (rdev->family) {
 		switch (rdev->family) {
 		case CHIP_RS400:
 		case CHIP_RS400:
@@ -3217,6 +3217,9 @@ void r100_bandwidth_update(struct radeon_device *rdev)
 	uint32_t pixel_bytes1 = 0;
 	uint32_t pixel_bytes1 = 0;
 	uint32_t pixel_bytes2 = 0;
 	uint32_t pixel_bytes2 = 0;
 
 
+	/* Guess line buffer size to be 8192 pixels */
+	u32 lb_size = 8192;
+
 	if (!rdev->mode_info.mode_config_initialized)
 	if (!rdev->mode_info.mode_config_initialized)
 		return;
 		return;
 
 
@@ -3631,6 +3634,13 @@ void r100_bandwidth_update(struct radeon_device *rdev)
 		DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n",
 		DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n",
 			  (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
 			  (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
 	}
 	}
+
+	/* Save number of lines the linebuffer leads before the scanout */
+	if (mode1)
+	    rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay);
+
+	if (mode2)
+	    rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay);
 }
 }
 
 
 int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
 int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)

+ 1 - 1
drivers/gpu/drm/radeon/r600.c

@@ -4276,7 +4276,7 @@ restart_ih:
 		WREG32(IH_RB_RPTR, rptr);
 		WREG32(IH_RB_RPTR, rptr);
 	}
 	}
 	if (queue_hotplug)
 	if (queue_hotplug)
-		schedule_work(&rdev->hotplug_work);
+		schedule_delayed_work(&rdev->hotplug_work, 0);
 	if (queue_hdmi)
 	if (queue_hdmi)
 		schedule_work(&rdev->audio_work);
 		schedule_work(&rdev->audio_work);
 	if (queue_thermal && rdev->pm.dpm_enabled)
 	if (queue_thermal && rdev->pm.dpm_enabled)

+ 1 - 1
drivers/gpu/drm/radeon/radeon.h

@@ -2414,7 +2414,7 @@ struct radeon_device {
 	struct r600_ih ih; /* r6/700 interrupt ring */
 	struct r600_ih ih; /* r6/700 interrupt ring */
 	struct radeon_rlc rlc;
 	struct radeon_rlc rlc;
 	struct radeon_mec mec;
 	struct radeon_mec mec;
-	struct work_struct hotplug_work;
+	struct delayed_work hotplug_work;
 	struct work_struct dp_work;
 	struct work_struct dp_work;
 	struct work_struct audio_work;
 	struct work_struct audio_work;
 	int num_crtc; /* number of crtcs */
 	int num_crtc; /* number of crtcs */

+ 3 - 0
drivers/gpu/drm/radeon/radeon_agp.c

@@ -54,6 +54,9 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
 	/* Intel 82855PM host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #195051) */
 	/* Intel 82855PM host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #195051) */
 	{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e50,
 	{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e50,
 		PCI_VENDOR_ID_IBM, 0x0550, 1},
 		PCI_VENDOR_ID_IBM, 0x0550, 1},
+	/* Intel 82855PM host bridge / RV250/M9 GL [Mobility FireGL 9000/Radeon 9000] needs AGPMode 1 (Thinkpad T40p) */
+	{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66,
+		PCI_VENDOR_ID_IBM, 0x054d, 1},
 	/* Intel 82855PM host bridge / Mobility M7 needs AGPMode 1 */
 	/* Intel 82855PM host bridge / Mobility M7 needs AGPMode 1 */
 	{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c57,
 	{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c57,
 		PCI_VENDOR_ID_IBM, 0x0530, 1},
 		PCI_VENDOR_ID_IBM, 0x0530, 1},

+ 20 - 1
drivers/gpu/drm/radeon/radeon_connectors.c

@@ -1234,13 +1234,32 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
 	if (r < 0)
 	if (r < 0)
 		return connector_status_disconnected;
 		return connector_status_disconnected;
 
 
+	if (radeon_connector->detected_hpd_without_ddc) {
+		force = true;
+		radeon_connector->detected_hpd_without_ddc = false;
+	}
+
 	if (!force && radeon_check_hpd_status_unchanged(connector)) {
 	if (!force && radeon_check_hpd_status_unchanged(connector)) {
 		ret = connector->status;
 		ret = connector->status;
 		goto exit;
 		goto exit;
 	}
 	}
 
 
-	if (radeon_connector->ddc_bus)
+	if (radeon_connector->ddc_bus) {
 		dret = radeon_ddc_probe(radeon_connector, false);
 		dret = radeon_ddc_probe(radeon_connector, false);
+
+		/* Sometimes the pins required for the DDC probe on DVI
+		 * connectors don't make contact at the same time that the ones
+		 * for HPD do. If the DDC probe fails even though we had an HPD
+		 * signal, try again later */
+		if (!dret && !force &&
+		    connector->status != connector_status_connected) {
+			DRM_DEBUG_KMS("hpd detected without ddc, retrying in 1 second\n");
+			radeon_connector->detected_hpd_without_ddc = true;
+			schedule_delayed_work(&rdev->hotplug_work,
+					      msecs_to_jiffies(1000));
+			goto exit;
+		}
+	}
 	if (dret) {
 	if (dret) {
 		radeon_connector->detected_by_load = false;
 		radeon_connector->detected_by_load = false;
 		radeon_connector_free_edid(connector);
 		radeon_connector_free_edid(connector);

+ 79 - 27
drivers/gpu/drm/radeon/radeon_display.c

@@ -322,7 +322,9 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
 	 * to complete in this vblank?
 	 * to complete in this vblank?
 	 */
 	 */
 	if (update_pending &&
 	if (update_pending &&
-	    (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, 0,
+	    (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev,
+							       crtc_id,
+							       USE_REAL_VBLANKSTART,
 							       &vpos, &hpos, NULL, NULL,
 							       &vpos, &hpos, NULL, NULL,
 							       &rdev->mode_info.crtcs[crtc_id]->base.hwmode)) &&
 							       &rdev->mode_info.crtcs[crtc_id]->base.hwmode)) &&
 	    ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) ||
 	    ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) ||
@@ -401,6 +403,8 @@ static void radeon_flip_work_func(struct work_struct *__work)
 	struct drm_crtc *crtc = &radeon_crtc->base;
 	struct drm_crtc *crtc = &radeon_crtc->base;
 	unsigned long flags;
 	unsigned long flags;
 	int r;
 	int r;
+	int vpos, hpos, stat, min_udelay;
+	struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
 
 
         down_read(&rdev->exclusive_lock);
         down_read(&rdev->exclusive_lock);
 	if (work->fence) {
 	if (work->fence) {
@@ -437,6 +441,41 @@ static void radeon_flip_work_func(struct work_struct *__work)
 	/* set the proper interrupt */
 	/* set the proper interrupt */
 	radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
 	radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
 
 
+	/* If this happens to execute within the "virtually extended" vblank
+	 * interval before the start of the real vblank interval then it needs
+	 * to delay programming the mmio flip until the real vblank is entered.
+	 * This prevents completing a flip too early due to the way we fudge
+	 * our vblank counter and vblank timestamps in order to work around the
+	 * problem that the hw fires vblank interrupts before actual start of
+	 * vblank (when line buffer refilling is done for a frame). It
+	 * complements the fudging logic in radeon_get_crtc_scanoutpos() for
+	 * timestamping and radeon_get_vblank_counter_kms() for vblank counts.
+	 *
+	 * In practice this won't execute very often unless on very fast
+	 * machines because the time window for this to happen is very small.
+	 */
+	for (;;) {
+		/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
+		 * start in hpos, and to the "fudged earlier" vblank start in
+		 * vpos.
+		 */
+		stat = radeon_get_crtc_scanoutpos(rdev->ddev, work->crtc_id,
+						  GET_DISTANCE_TO_VBLANKSTART,
+						  &vpos, &hpos, NULL, NULL,
+						  &crtc->hwmode);
+
+		if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
+		    (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) ||
+		    !(vpos >= 0 && hpos <= 0))
+			break;
+
+		/* Sleep at least until estimated real start of hw vblank */
+		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+		min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
+		usleep_range(min_udelay, 2 * min_udelay);
+		spin_lock_irqsave(&crtc->dev->event_lock, flags);
+	};
+
 	/* do the flip (mmio) */
 	/* do the flip (mmio) */
 	radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
 	radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
 
 
@@ -1768,6 +1807,15 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
  * \param dev Device to query.
  * \param dev Device to query.
  * \param crtc Crtc to query.
  * \param crtc Crtc to query.
  * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
  * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
+ *              For driver internal use only also supports these flags:
+ *
+ *              USE_REAL_VBLANKSTART to use the real start of vblank instead
+ *              of a fudged earlier start of vblank.
+ *
+ *              GET_DISTANCE_TO_VBLANKSTART to return distance to the
+ *              fudged earlier start of vblank in *vpos and the distance
+ *              to true start of vblank in *hpos.
+ *
  * \param *vpos Location where vertical scanout position should be stored.
  * \param *vpos Location where vertical scanout position should be stored.
  * \param *hpos Location where horizontal scanout position should go.
  * \param *hpos Location where horizontal scanout position should go.
  * \param *stime Target location for timestamp taken immediately before
  * \param *stime Target location for timestamp taken immediately before
@@ -1911,10 +1959,40 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
 		vbl_end = 0;
 		vbl_end = 0;
 	}
 	}
 
 
+	/* Called from driver internal vblank counter query code? */
+	if (flags & GET_DISTANCE_TO_VBLANKSTART) {
+	    /* Caller wants distance from real vbl_start in *hpos */
+	    *hpos = *vpos - vbl_start;
+	}
+
+	/* Fudge vblank to start a few scanlines earlier to handle the
+	 * problem that vblank irqs fire a few scanlines before start
+	 * of vblank. Some driver internal callers need the true vblank
+	 * start to be used and signal this via the USE_REAL_VBLANKSTART flag.
+	 *
+	 * The cause of the "early" vblank irq is that the irq is triggered
+	 * by the line buffer logic when the line buffer read position enters
+	 * the vblank, whereas our crtc scanout position naturally lags the
+	 * line buffer read position.
+	 */
+	if (!(flags & USE_REAL_VBLANKSTART))
+		vbl_start -= rdev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
+
 	/* Test scanout position against vblank region. */
 	/* Test scanout position against vblank region. */
 	if ((*vpos < vbl_start) && (*vpos >= vbl_end))
 	if ((*vpos < vbl_start) && (*vpos >= vbl_end))
 		in_vbl = false;
 		in_vbl = false;
 
 
+	/* In vblank? */
+	if (in_vbl)
+	    ret |= DRM_SCANOUTPOS_IN_VBLANK;
+
+	/* Called from driver internal vblank counter query code? */
+	if (flags & GET_DISTANCE_TO_VBLANKSTART) {
+		/* Caller wants distance from fudged earlier vbl_start */
+		*vpos -= vbl_start;
+		return ret;
+	}
+
 	/* Check if inside vblank area and apply corrective offsets:
 	/* Check if inside vblank area and apply corrective offsets:
 	 * vpos will then be >=0 in video scanout area, but negative
 	 * vpos will then be >=0 in video scanout area, but negative
 	 * within vblank area, counting down the number of lines until
 	 * within vblank area, counting down the number of lines until
@@ -1930,31 +2008,5 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
 	/* Correct for shifted end of vbl at vbl_end. */
 	/* Correct for shifted end of vbl at vbl_end. */
 	*vpos = *vpos - vbl_end;
 	*vpos = *vpos - vbl_end;
 
 
-	/* In vblank? */
-	if (in_vbl)
-		ret |= DRM_SCANOUTPOS_IN_VBLANK;
-
-	/* Is vpos outside nominal vblank area, but less than
-	 * 1/100 of a frame height away from start of vblank?
-	 * If so, assume this isn't a massively delayed vblank
-	 * interrupt, but a vblank interrupt that fired a few
-	 * microseconds before true start of vblank. Compensate
-	 * by adding a full frame duration to the final timestamp.
-	 * Happens, e.g., on ATI R500, R600.
-	 *
-	 * We only do this if DRM_CALLED_FROM_VBLIRQ.
-	 */
-	if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) {
-		vbl_start = mode->crtc_vdisplay;
-		vtotal = mode->crtc_vtotal;
-
-		if (vbl_start - *vpos < vtotal / 100) {
-			*vpos -= vtotal;
-
-			/* Signal this correction as "applied". */
-			ret |= 0x8;
-		}
-	}
-
 	return ret;
 	return ret;
 }
 }

+ 4 - 4
drivers/gpu/drm/radeon/radeon_irq_kms.c

@@ -74,7 +74,7 @@ irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg)
 static void radeon_hotplug_work_func(struct work_struct *work)
 static void radeon_hotplug_work_func(struct work_struct *work)
 {
 {
 	struct radeon_device *rdev = container_of(work, struct radeon_device,
 	struct radeon_device *rdev = container_of(work, struct radeon_device,
-						  hotplug_work);
+						  hotplug_work.work);
 	struct drm_device *dev = rdev->ddev;
 	struct drm_device *dev = rdev->ddev;
 	struct drm_mode_config *mode_config = &dev->mode_config;
 	struct drm_mode_config *mode_config = &dev->mode_config;
 	struct drm_connector *connector;
 	struct drm_connector *connector;
@@ -302,7 +302,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
 		}
 		}
 	}
 	}
 
 
-	INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
+	INIT_DELAYED_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
 	INIT_WORK(&rdev->dp_work, radeon_dp_work_func);
 	INIT_WORK(&rdev->dp_work, radeon_dp_work_func);
 	INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
 	INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
 
 
@@ -310,7 +310,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
 	r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq);
 	r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq);
 	if (r) {
 	if (r) {
 		rdev->irq.installed = false;
 		rdev->irq.installed = false;
-		flush_work(&rdev->hotplug_work);
+		flush_delayed_work(&rdev->hotplug_work);
 		return r;
 		return r;
 	}
 	}
 
 
@@ -333,7 +333,7 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
 		rdev->irq.installed = false;
 		rdev->irq.installed = false;
 		if (rdev->msi_enabled)
 		if (rdev->msi_enabled)
 			pci_disable_msi(rdev->pdev);
 			pci_disable_msi(rdev->pdev);
-		flush_work(&rdev->hotplug_work);
+		flush_delayed_work(&rdev->hotplug_work);
 	}
 	}
 }
 }
 
 

+ 49 - 1
drivers/gpu/drm/radeon/radeon_kms.c

@@ -755,6 +755,8 @@ void radeon_driver_preclose_kms(struct drm_device *dev,
  */
  */
 u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
 u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
 {
 {
+	int vpos, hpos, stat;
+	u32 count;
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_device *rdev = dev->dev_private;
 
 
 	if (crtc < 0 || crtc >= rdev->num_crtc) {
 	if (crtc < 0 || crtc >= rdev->num_crtc) {
@@ -762,7 +764,53 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	return radeon_get_vblank_counter(rdev, crtc);
+	/* The hw increments its frame counter at start of vsync, not at start
+	 * of vblank, as is required by DRM core vblank counter handling.
+	 * Cook the hw count here to make it appear to the caller as if it
+	 * incremented at start of vblank. We measure distance to start of
+	 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
+	 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
+	 * result by 1 to give the proper appearance to caller.
+	 */
+	if (rdev->mode_info.crtcs[crtc]) {
+		/* Repeat readout if needed to provide stable result if
+		 * we cross start of vsync during the queries.
+		 */
+		do {
+			count = radeon_get_vblank_counter(rdev, crtc);
+			/* Ask radeon_get_crtc_scanoutpos to return vpos as
+			 * distance to start of vblank, instead of regular
+			 * vertical scanout pos.
+			 */
+			stat = radeon_get_crtc_scanoutpos(
+				dev, crtc, GET_DISTANCE_TO_VBLANKSTART,
+				&vpos, &hpos, NULL, NULL,
+				&rdev->mode_info.crtcs[crtc]->base.hwmode);
+		} while (count != radeon_get_vblank_counter(rdev, crtc));
+
+		if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
+		    (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
+			DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
+		}
+		else {
+			DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
+				      crtc, vpos);
+
+			/* Bump counter if we are at >= leading edge of vblank,
+			 * but before vsync where vpos would turn negative and
+			 * the hw counter really increments.
+			 */
+			if (vpos >= 0)
+				count++;
+		}
+	}
+	else {
+	    /* Fallback to use value as is. */
+	    count = radeon_get_vblank_counter(rdev, crtc);
+	    DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
+	}
+
+	return count;
 }
 }
 
 
 /**
 /**

+ 5 - 0
drivers/gpu/drm/radeon/radeon_mode.h

@@ -367,6 +367,7 @@ struct radeon_crtc {
 	u32 line_time;
 	u32 line_time;
 	u32 wm_low;
 	u32 wm_low;
 	u32 wm_high;
 	u32 wm_high;
+	u32 lb_vblank_lead_lines;
 	struct drm_display_mode hw_mode;
 	struct drm_display_mode hw_mode;
 	enum radeon_output_csc output_csc;
 	enum radeon_output_csc output_csc;
 };
 };
@@ -553,6 +554,7 @@ struct radeon_connector {
 	void *con_priv;
 	void *con_priv;
 	bool dac_load_detect;
 	bool dac_load_detect;
 	bool detected_by_load; /* if the connection status was determined by load */
 	bool detected_by_load; /* if the connection status was determined by load */
+	bool detected_hpd_without_ddc; /* if an HPD signal was detected on DVI, but ddc probing failed */
 	uint16_t connector_object_id;
 	uint16_t connector_object_id;
 	struct radeon_hpd hpd;
 	struct radeon_hpd hpd;
 	struct radeon_router router;
 	struct radeon_router router;
@@ -686,6 +688,9 @@ struct atom_voltage_table
 	struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES];
 	struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES];
 };
 };
 
 
+/* Driver internal use only flags of radeon_get_crtc_scanoutpos() */
+#define USE_REAL_VBLANKSTART 		(1 << 30)
+#define GET_DISTANCE_TO_VBLANKSTART	(1 << 31)
 
 
 extern void
 extern void
 radeon_add_atom_connector(struct drm_device *dev,
 radeon_add_atom_connector(struct drm_device *dev,

+ 3 - 1
drivers/gpu/drm/radeon/radeon_pm.c

@@ -1756,7 +1756,9 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev)
 	 */
 	 */
 	for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
 	for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
 		if (rdev->pm.active_crtcs & (1 << crtc)) {
 		if (rdev->pm.active_crtcs & (1 << crtc)) {
-			vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0,
+			vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev,
+								crtc,
+								USE_REAL_VBLANKSTART,
 								&vpos, &hpos, NULL, NULL,
 								&vpos, &hpos, NULL, NULL,
 								&rdev->mode_info.crtcs[crtc]->base.hwmode);
 								&rdev->mode_info.crtcs[crtc]->base.hwmode);
 			if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
 			if ((vbl_status & DRM_SCANOUTPOS_VALID) &&

+ 1 - 1
drivers/gpu/drm/radeon/rs600.c

@@ -813,7 +813,7 @@ int rs600_irq_process(struct radeon_device *rdev)
 		status = rs600_irq_ack(rdev);
 		status = rs600_irq_ack(rdev);
 	}
 	}
 	if (queue_hotplug)
 	if (queue_hotplug)
-		schedule_work(&rdev->hotplug_work);
+		schedule_delayed_work(&rdev->hotplug_work, 0);
 	if (queue_hdmi)
 	if (queue_hdmi)
 		schedule_work(&rdev->audio_work);
 		schedule_work(&rdev->audio_work);
 	if (rdev->msi_enabled) {
 	if (rdev->msi_enabled) {

+ 10 - 0
drivers/gpu/drm/radeon/rs690.c

@@ -207,6 +207,9 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev,
 {
 {
 	u32 tmp;
 	u32 tmp;
 
 
+	/* Guess line buffer size to be 8192 pixels */
+	u32 lb_size = 8192;
+
 	/*
 	/*
 	 * Line Buffer Setup
 	 * Line Buffer Setup
 	 * There is a single line buffer shared by both display controllers.
 	 * There is a single line buffer shared by both display controllers.
@@ -243,6 +246,13 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev,
 		tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
 		tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
 	}
 	}
 	WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp);
 	WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp);
+
+	/* Save number of lines the linebuffer leads before the scanout */
+	if (mode1)
+		rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay);
+
+	if (mode2)
+		rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay);
 }
 }
 
 
 struct rs690_watermark {
 struct rs690_watermark {

+ 4 - 1
drivers/gpu/drm/radeon/si.c

@@ -2376,6 +2376,9 @@ static void dce6_program_watermarks(struct radeon_device *rdev,
 		c.full = dfixed_div(c, a);
 		c.full = dfixed_div(c, a);
 		priority_b_mark = dfixed_trunc(c);
 		priority_b_mark = dfixed_trunc(c);
 		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
 		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
+
+		/* Save number of lines the linebuffer leads before the scanout */
+		radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
 	}
 	}
 
 
 	/* select wm A */
 	/* select wm A */
@@ -6848,7 +6851,7 @@ restart_ih:
 	if (queue_dp)
 	if (queue_dp)
 		schedule_work(&rdev->dp_work);
 		schedule_work(&rdev->dp_work);
 	if (queue_hotplug)
 	if (queue_hotplug)
-		schedule_work(&rdev->hotplug_work);
+		schedule_delayed_work(&rdev->hotplug_work, 0);
 	if (queue_thermal && rdev->pm.dpm_enabled)
 	if (queue_thermal && rdev->pm.dpm_enabled)
 		schedule_work(&rdev->pm.dpm.thermal.work);
 		schedule_work(&rdev->pm.dpm.thermal.work);
 	rdev->ih.rptr = rptr;
 	rdev->ih.rptr = rptr;

+ 1 - 0
drivers/gpu/drm/rockchip/rockchip_drm_gem.c

@@ -67,6 +67,7 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
 	 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
 	 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
 	 */
 	 */
 	vma->vm_flags &= ~VM_PFNMAP;
 	vma->vm_flags &= ~VM_PFNMAP;
+	vma->vm_pgoff = 0;
 
 
 	ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
 	ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
 			     obj->size, &rk_obj->dma_attrs);
 			     obj->size, &rk_obj->dma_attrs);

Энэ ялгаанд хэт олон файл өөрчлөгдсөн тул зарим файлыг харуулаагүй болно