浏览代码

Merge branch 'x86/cache' into perf/core, to pick up fixes

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Ingo Molnar 6 年之前
父节点
当前提交
dda93b4538
共有 100 个文件被更改,包括 1001 次插入583 次删除
  1. 5 0
      Documentation/driver-api/fpga/fpga-mgr.rst
  2. 3 2
      Documentation/fb/uvesafb.txt
  3. 1 1
      Documentation/networking/ip-sysctl.txt
  4. 7 9
      MAINTAINERS
  5. 1 1
      Makefile
  6. 2 0
      arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
  7. 8 6
      arch/arm/boot/dts/bcm63138.dtsi
  8. 2 2
      arch/arm/boot/dts/stm32mp157c.dtsi
  9. 1 2
      arch/arm/boot/dts/sun8i-r40.dtsi
  10. 1 1
      arch/arm/mm/ioremap.c
  11. 1 0
      arch/arm/tools/syscall.tbl
  12. 54 1
      arch/arm64/kvm/guest.c
  13. 43 7
      arch/arm64/mm/hugetlbpage.c
  14. 10 0
      arch/powerpc/kernel/process.c
  15. 10 0
      arch/powerpc/kvm/book3s_64_mmu_radix.c
  16. 12 8
      arch/powerpc/lib/code-patching.c
  17. 3 2
      arch/powerpc/mm/numa.c
  18. 1 1
      arch/riscv/kernel/setup.c
  19. 14 2
      arch/x86/entry/vdso/Makefile
  20. 14 12
      arch/x86/entry/vdso/vclock_gettime.c
  21. 6 0
      arch/x86/include/asm/uv/uv.h
  22. 1 1
      arch/x86/kernel/cpu/amd.c
  23. 11 6
      arch/x86/kernel/cpu/intel_rdt.c
  24. 3 3
      arch/x86/kernel/cpu/intel_rdt.h
  25. 10 2
      arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
  26. 22 10
      arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
  27. 182 28
      arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
  28. 4 0
      arch/x86/kernel/tsc.c
  29. 20 4
      arch/x86/kvm/mmu.c
  30. 76 61
      arch/x86/kvm/vmx.c
  31. 1 1
      arch/x86/kvm/x86.c
  32. 5 2
      drivers/base/firmware_loader/main.c
  33. 4 1
      drivers/base/power/main.c
  34. 4 4
      drivers/crypto/caam/caamalg.c
  35. 22 10
      drivers/crypto/chelsio/chcr_algo.c
  36. 2 0
      drivers/crypto/chelsio/chcr_crypto.h
  37. 30 23
      drivers/crypto/mxs-dcp.c
  38. 3 3
      drivers/crypto/qat/qat_c3xxx/adf_drv.c
  39. 3 3
      drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
  40. 3 3
      drivers/crypto/qat/qat_c62x/adf_drv.c
  41. 3 3
      drivers/crypto/qat/qat_c62xvf/adf_drv.c
  42. 3 3
      drivers/crypto/qat/qat_dh895xcc/adf_drv.c
  43. 3 3
      drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
  44. 3 1
      drivers/fpga/dfl-fme-region.c
  45. 1 1
      drivers/fpga/fpga-bridge.c
  46. 2 1
      drivers/fpga/of-fpga-region.c
  47. 1 1
      drivers/gpio/gpiolib.c
  48. 29 8
      drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
  49. 8 2
      drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
  50. 26 9
      drivers/gpu/drm/drm_client.c
  51. 3 1
      drivers/gpu/drm/drm_fb_cma_helper.c
  52. 3 1
      drivers/gpu/drm/drm_fb_helper.c
  53. 3 3
      drivers/gpu/drm/drm_lease.c
  54. 6 28
      drivers/gpu/drm/exynos/exynos_drm_iommu.h
  55. 3 2
      drivers/gpu/drm/i2c/tda9950.c
  56. 63 25
      drivers/gpu/drm/i915/i915_gpu_error.c
  57. 1 0
      drivers/gpu/drm/i915/i915_gpu_error.h
  58. 12 21
      drivers/gpu/drm/i915/i915_irq.c
  59. 0 1
      drivers/gpu/drm/i915/i915_pci.c
  60. 0 1
      drivers/hid/hid-ids.h
  61. 10 17
      drivers/hid/i2c-hid/i2c-hid.c
  62. 1 0
      drivers/hid/intel-ish-hid/ipc/hw-ish.h
  63. 1 0
      drivers/hid/intel-ish-hid/ipc/pci-ish.c
  64. 5 3
      drivers/hv/connection.c
  65. 3 1
      drivers/i2c/busses/i2c-designware-master.c
  66. 1 1
      drivers/i2c/busses/i2c-isch.c
  67. 18 4
      drivers/i2c/busses/i2c-qcom-geni.c
  68. 1 0
      drivers/i2c/busses/i2c-scmi.c
  69. 1 1
      drivers/iommu/amd_iommu.c
  70. 2 2
      drivers/md/dm-cache-metadata.c
  71. 7 2
      drivers/md/dm-cache-target.c
  72. 8 6
      drivers/md/dm-mpath.c
  73. 1 1
      drivers/md/dm-raid.c
  74. 2 4
      drivers/md/dm-thin-metadata.c
  75. 20 18
      drivers/media/v4l2-core/v4l2-event.c
  76. 2 0
      drivers/media/v4l2-core/v4l2-fh.c
  77. 1 1
      drivers/mmc/core/host.c
  78. 1 1
      drivers/mmc/core/slot-gpio.c
  79. 2 1
      drivers/mmc/host/renesas_sdhi_sys_dmac.c
  80. 37 28
      drivers/net/bonding/bond_main.c
  81. 2 2
      drivers/net/dsa/b53/b53_common.c
  82. 0 22
      drivers/net/ethernet/amazon/ena/ena_netdev.c
  83. 6 4
      drivers/net/ethernet/amd/declance.c
  84. 11 17
      drivers/net/ethernet/broadcom/bcmsysport.c
  85. 18 9
      drivers/net/ethernet/broadcom/bnxt/bnxt.c
  86. 3 3
      drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
  87. 1 0
      drivers/net/ethernet/cadence/macb_main.c
  88. 17 0
      drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
  89. 1 4
      drivers/net/ethernet/emulex/benet/be_main.c
  90. 4 4
      drivers/net/ethernet/freescale/fec_main.c
  91. 1 1
      drivers/net/ethernet/hisilicon/hns/hnae.c
  92. 19 29
      drivers/net/ethernet/hisilicon/hns/hns_enet.c
  93. 0 20
      drivers/net/ethernet/huawei/hinic/hinic_main.c
  94. 0 14
      drivers/net/ethernet/ibm/ehea/ehea_main.c
  95. 0 16
      drivers/net/ethernet/ibm/ibmvnic.c
  96. 7 5
      drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
  97. 5 4
      drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
  98. 1 0
      drivers/net/ethernet/mellanox/mlx5/core/en.h
  99. 2 0
      drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
  100. 1 1
      drivers/net/ethernet/mellanox/mlx5/core/en_main.c

+ 5 - 0
Documentation/driver-api/fpga/fpga-mgr.rst

@@ -184,6 +184,11 @@ API for implementing a new FPGA Manager driver
 API for programming an FPGA
 API for programming an FPGA
 ---------------------------
 ---------------------------
 
 
+FPGA Manager flags
+
+.. kernel-doc:: include/linux/fpga/fpga-mgr.h
+   :doc: FPGA Manager flags
+
 .. kernel-doc:: include/linux/fpga/fpga-mgr.h
 .. kernel-doc:: include/linux/fpga/fpga-mgr.h
    :functions: fpga_image_info
    :functions: fpga_image_info
 
 

+ 3 - 2
Documentation/fb/uvesafb.txt

@@ -15,7 +15,8 @@ than x86.  Check the v86d documentation for a list of currently supported
 arches.
 arches.
 
 
 v86d source code can be downloaded from the following website:
 v86d source code can be downloaded from the following website:
-  http://dev.gentoo.org/~spock/projects/uvesafb
+
+  https://github.com/mjanusz/v86d
 
 
 Please refer to the v86d documentation for detailed configuration and
 Please refer to the v86d documentation for detailed configuration and
 installation instructions.
 installation instructions.
@@ -177,7 +178,7 @@ from the Video BIOS if you set pixclock to 0 in fb_var_screeninfo.
 
 
 --
 --
  Michal Januszewski <spock@gentoo.org>
  Michal Januszewski <spock@gentoo.org>
- Last updated: 2009-03-30
+ Last updated: 2017-10-10
 
 
  Documentation of the uvesafb options is loosely based on vesafb.txt.
  Documentation of the uvesafb options is loosely based on vesafb.txt.
 
 

+ 1 - 1
Documentation/networking/ip-sysctl.txt

@@ -425,7 +425,7 @@ tcp_mtu_probing - INTEGER
 	  1 - Disabled by default, enabled when an ICMP black hole detected
 	  1 - Disabled by default, enabled when an ICMP black hole detected
 	  2 - Always enabled, use initial MSS of tcp_base_mss.
 	  2 - Always enabled, use initial MSS of tcp_base_mss.
 
 
-tcp_probe_interval - INTEGER
+tcp_probe_interval - UNSIGNED INTEGER
 	Controls how often to start TCP Packetization-Layer Path MTU
 	Controls how often to start TCP Packetization-Layer Path MTU
 	Discovery reprobe. The default is reprobing every 10 minutes as
 	Discovery reprobe. The default is reprobing every 10 minutes as
 	per RFC4821.
 	per RFC4821.

+ 7 - 9
MAINTAINERS

@@ -324,7 +324,6 @@ F:	Documentation/ABI/testing/sysfs-bus-acpi
 F:	Documentation/ABI/testing/configfs-acpi
 F:	Documentation/ABI/testing/configfs-acpi
 F:	drivers/pci/*acpi*
 F:	drivers/pci/*acpi*
 F:	drivers/pci/*/*acpi*
 F:	drivers/pci/*/*acpi*
-F:	drivers/pci/*/*/*acpi*
 F:	tools/power/acpi/
 F:	tools/power/acpi/
 
 
 ACPI APEI
 ACPI APEI
@@ -1251,7 +1250,7 @@ N:	meson
 
 
 ARM/Annapurna Labs ALPINE ARCHITECTURE
 ARM/Annapurna Labs ALPINE ARCHITECTURE
 M:	Tsahee Zidenberg <tsahee@annapurnalabs.com>
 M:	Tsahee Zidenberg <tsahee@annapurnalabs.com>
-M:	Antoine Tenart <antoine.tenart@free-electrons.com>
+M:	Antoine Tenart <antoine.tenart@bootlin.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 S:	Maintained
 F:	arch/arm/mach-alpine/
 F:	arch/arm/mach-alpine/
@@ -2956,7 +2955,6 @@ F:	include/linux/bcm963xx_tag.h
 
 
 BROADCOM BNX2 GIGABIT ETHERNET DRIVER
 BROADCOM BNX2 GIGABIT ETHERNET DRIVER
 M:	Rasesh Mody <rasesh.mody@cavium.com>
 M:	Rasesh Mody <rasesh.mody@cavium.com>
-M:	Harish Patil <harish.patil@cavium.com>
 M:	Dept-GELinuxNICDev@cavium.com
 M:	Dept-GELinuxNICDev@cavium.com
 L:	netdev@vger.kernel.org
 L:	netdev@vger.kernel.org
 S:	Supported
 S:	Supported
@@ -2977,6 +2975,7 @@ F:	drivers/scsi/bnx2i/
 
 
 BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
 BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
 M:	Ariel Elior <ariel.elior@cavium.com>
 M:	Ariel Elior <ariel.elior@cavium.com>
+M:	Sudarsana Kalluru <sudarsana.kalluru@cavium.com>
 M:	everest-linux-l2@cavium.com
 M:	everest-linux-l2@cavium.com
 L:	netdev@vger.kernel.org
 L:	netdev@vger.kernel.org
 S:	Supported
 S:	Supported
@@ -5470,7 +5469,8 @@ S:	Odd Fixes
 F:	drivers/net/ethernet/agere/
 F:	drivers/net/ethernet/agere/
 
 
 ETHERNET BRIDGE
 ETHERNET BRIDGE
-M:	Stephen Hemminger <stephen@networkplumber.org>
+M:	Roopa Prabhu <roopa@cumulusnetworks.com>
+M:	Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
 L:	bridge@lists.linux-foundation.org (moderated for non-subscribers)
 L:	bridge@lists.linux-foundation.org (moderated for non-subscribers)
 L:	netdev@vger.kernel.org
 L:	netdev@vger.kernel.org
 W:	http://www.linuxfoundation.org/en/Net:Bridge
 W:	http://www.linuxfoundation.org/en/Net:Bridge
@@ -8598,7 +8598,6 @@ F:	include/linux/spinlock*.h
 F:	arch/*/include/asm/spinlock*.h
 F:	arch/*/include/asm/spinlock*.h
 F:	include/linux/rwlock*.h
 F:	include/linux/rwlock*.h
 F:	include/linux/mutex*.h
 F:	include/linux/mutex*.h
-F:	arch/*/include/asm/mutex*.h
 F:	include/linux/rwsem*.h
 F:	include/linux/rwsem*.h
 F:	arch/*/include/asm/rwsem.h
 F:	arch/*/include/asm/rwsem.h
 F:	include/linux/seqlock.h
 F:	include/linux/seqlock.h
@@ -10942,7 +10941,7 @@ M:	Willy Tarreau <willy@haproxy.com>
 M:	Ksenija Stanojevic <ksenija.stanojevic@gmail.com>
 M:	Ksenija Stanojevic <ksenija.stanojevic@gmail.com>
 S:	Odd Fixes
 S:	Odd Fixes
 F:	Documentation/auxdisplay/lcd-panel-cgram.txt
 F:	Documentation/auxdisplay/lcd-panel-cgram.txt
-F:	drivers/misc/panel.c
+F:	drivers/auxdisplay/panel.c
 
 
 PARALLEL PORT SUBSYSTEM
 PARALLEL PORT SUBSYSTEM
 M:	Sudip Mukherjee <sudipm.mukherjee@gmail.com>
 M:	Sudip Mukherjee <sudipm.mukherjee@gmail.com>
@@ -11979,7 +11978,7 @@ F:	Documentation/scsi/LICENSE.qla4xxx
 F:	drivers/scsi/qla4xxx/
 F:	drivers/scsi/qla4xxx/
 
 
 QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
 QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
-M:	Harish Patil <harish.patil@cavium.com>
+M:	Shahed Shaikh <Shahed.Shaikh@cavium.com>
 M:	Manish Chopra <manish.chopra@cavium.com>
 M:	Manish Chopra <manish.chopra@cavium.com>
 M:	Dept-GELinuxNICDev@cavium.com
 M:	Dept-GELinuxNICDev@cavium.com
 L:	netdev@vger.kernel.org
 L:	netdev@vger.kernel.org
@@ -11987,7 +11986,6 @@ S:	Supported
 F:	drivers/net/ethernet/qlogic/qlcnic/
 F:	drivers/net/ethernet/qlogic/qlcnic/
 
 
 QLOGIC QLGE 10Gb ETHERNET DRIVER
 QLOGIC QLGE 10Gb ETHERNET DRIVER
-M:	Harish Patil <harish.patil@cavium.com>
 M:	Manish Chopra <manish.chopra@cavium.com>
 M:	Manish Chopra <manish.chopra@cavium.com>
 M:	Dept-GELinuxNICDev@cavium.com
 M:	Dept-GELinuxNICDev@cavium.com
 L:	netdev@vger.kernel.org
 L:	netdev@vger.kernel.org
@@ -15395,7 +15393,7 @@ S:	Maintained
 UVESAFB DRIVER
 UVESAFB DRIVER
 M:	Michal Januszewski <spock@gentoo.org>
 M:	Michal Januszewski <spock@gentoo.org>
 L:	linux-fbdev@vger.kernel.org
 L:	linux-fbdev@vger.kernel.org
-W:	http://dev.gentoo.org/~spock/projects/uvesafb/
+W:	https://github.com/mjanusz/v86d
 S:	Maintained
 S:	Maintained
 F:	Documentation/fb/uvesafb.txt
 F:	Documentation/fb/uvesafb.txt
 F:	drivers/video/fbdev/uvesafb.*
 F:	drivers/video/fbdev/uvesafb.*

+ 1 - 1
Makefile

@@ -2,7 +2,7 @@
 VERSION = 4
 VERSION = 4
 PATCHLEVEL = 19
 PATCHLEVEL = 19
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc7
 NAME = Merciless Moray
 NAME = Merciless Moray
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*

+ 2 - 0
arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts

@@ -11,6 +11,7 @@
 #include "sama5d2-pinfunc.h"
 #include "sama5d2-pinfunc.h"
 #include <dt-bindings/mfd/atmel-flexcom.h>
 #include <dt-bindings/mfd/atmel-flexcom.h>
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/at91.h>
 
 
 / {
 / {
 	model = "Atmel SAMA5D2 PTC EK";
 	model = "Atmel SAMA5D2 PTC EK";
@@ -299,6 +300,7 @@
 							 <PIN_PA30__NWE_NANDWE>,
 							 <PIN_PA30__NWE_NANDWE>,
 							 <PIN_PB2__NRD_NANDOE>;
 							 <PIN_PB2__NRD_NANDOE>;
 						bias-pull-up;
 						bias-pull-up;
+						atmel,drive-strength = <ATMEL_PIO_DRVSTR_ME>;
 					};
 					};
 
 
 					ale_cle_rdy_cs {
 					ale_cle_rdy_cs {

+ 8 - 6
arch/arm/boot/dts/bcm63138.dtsi

@@ -106,21 +106,23 @@
 		global_timer: timer@1e200 {
 		global_timer: timer@1e200 {
 			compatible = "arm,cortex-a9-global-timer";
 			compatible = "arm,cortex-a9-global-timer";
 			reg = <0x1e200 0x20>;
 			reg = <0x1e200 0x20>;
-			interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
 			clocks = <&axi_clk>;
 			clocks = <&axi_clk>;
 		};
 		};
 
 
 		local_timer: local-timer@1e600 {
 		local_timer: local-timer@1e600 {
 			compatible = "arm,cortex-a9-twd-timer";
 			compatible = "arm,cortex-a9-twd-timer";
 			reg = <0x1e600 0x20>;
 			reg = <0x1e600 0x20>;
-			interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
+						  IRQ_TYPE_EDGE_RISING)>;
 			clocks = <&axi_clk>;
 			clocks = <&axi_clk>;
 		};
 		};
 
 
 		twd_watchdog: watchdog@1e620 {
 		twd_watchdog: watchdog@1e620 {
 			compatible = "arm,cortex-a9-twd-wdt";
 			compatible = "arm,cortex-a9-twd-wdt";
 			reg = <0x1e620 0x20>;
 			reg = <0x1e620 0x20>;
-			interrupts = <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) |
+						  IRQ_TYPE_LEVEL_HIGH)>;
 		};
 		};
 
 
 		armpll: armpll {
 		armpll: armpll {
@@ -158,7 +160,7 @@
 		serial0: serial@600 {
 		serial0: serial@600 {
 			compatible = "brcm,bcm6345-uart";
 			compatible = "brcm,bcm6345-uart";
 			reg = <0x600 0x1b>;
 			reg = <0x600 0x1b>;
-			interrupts = <GIC_SPI 32 0>;
+			interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&periph_clk>;
 			clocks = <&periph_clk>;
 			clock-names = "periph";
 			clock-names = "periph";
 			status = "disabled";
 			status = "disabled";
@@ -167,7 +169,7 @@
 		serial1: serial@620 {
 		serial1: serial@620 {
 			compatible = "brcm,bcm6345-uart";
 			compatible = "brcm,bcm6345-uart";
 			reg = <0x620 0x1b>;
 			reg = <0x620 0x1b>;
-			interrupts = <GIC_SPI 33 0>;
+			interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&periph_clk>;
 			clocks = <&periph_clk>;
 			clock-names = "periph";
 			clock-names = "periph";
 			status = "disabled";
 			status = "disabled";
@@ -180,7 +182,7 @@
 			reg = <0x2000 0x600>, <0xf0 0x10>;
 			reg = <0x2000 0x600>, <0xf0 0x10>;
 			reg-names = "nand", "nand-int-base";
 			reg-names = "nand", "nand-int-base";
 			status = "disabled";
 			status = "disabled";
-			interrupts = <GIC_SPI 38 0>;
+			interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "nand";
 			interrupt-names = "nand";
 		};
 		};
 
 

+ 2 - 2
arch/arm/boot/dts/stm32mp157c.dtsi

@@ -1078,8 +1078,8 @@
 			interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
 			interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&rcc SPI6_K>;
 			clocks = <&rcc SPI6_K>;
 			resets = <&rcc SPI6_R>;
 			resets = <&rcc SPI6_R>;
-			dmas = <&mdma1 34 0x0 0x40008 0x0 0x0 0>,
-			       <&mdma1 35 0x0 0x40002 0x0 0x0 0>;
+			dmas = <&mdma1 34 0x0 0x40008 0x0 0x0>,
+			       <&mdma1 35 0x0 0x40002 0x0 0x0>;
 			dma-names = "rx", "tx";
 			dma-names = "rx", "tx";
 			status = "disabled";
 			status = "disabled";
 		};
 		};

+ 1 - 2
arch/arm/boot/dts/sun8i-r40.dtsi

@@ -800,8 +800,7 @@
 		};
 		};
 
 
 		hdmi_phy: hdmi-phy@1ef0000 {
 		hdmi_phy: hdmi-phy@1ef0000 {
-			compatible = "allwinner,sun8i-r40-hdmi-phy",
-				     "allwinner,sun50i-a64-hdmi-phy";
+			compatible = "allwinner,sun8i-r40-hdmi-phy";
 			reg = <0x01ef0000 0x10000>;
 			reg = <0x01ef0000 0x10000>;
 			clocks = <&ccu CLK_BUS_HDMI1>, <&ccu CLK_HDMI_SLOW>,
 			clocks = <&ccu CLK_BUS_HDMI1>, <&ccu CLK_HDMI_SLOW>,
 				 <&ccu 7>, <&ccu 16>;
 				 <&ccu 7>, <&ccu 16>;

+ 1 - 1
arch/arm/mm/ioremap.c

@@ -473,7 +473,7 @@ void pci_ioremap_set_mem_type(int mem_type)
 
 
 int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
 int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
 {
 {
-	BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT);
+	BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
 
 
 	return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
 	return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
 				  PCI_IO_VIRT_BASE + offset + SZ_64K,
 				  PCI_IO_VIRT_BASE + offset + SZ_64K,

+ 1 - 0
arch/arm/tools/syscall.tbl

@@ -413,3 +413,4 @@
 396	common	pkey_free		sys_pkey_free
 396	common	pkey_free		sys_pkey_free
 397	common	statx			sys_statx
 397	common	statx			sys_statx
 398	common	rseq			sys_rseq
 398	common	rseq			sys_rseq
+399	common	io_pgetevents		sys_io_pgetevents

+ 54 - 1
arch/arm64/kvm/guest.c

@@ -57,6 +57,45 @@ static u64 core_reg_offset_from_id(u64 id)
 	return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
 	return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
 }
 }
 
 
+static int validate_core_offset(const struct kvm_one_reg *reg)
+{
+	u64 off = core_reg_offset_from_id(reg->id);
+	int size;
+
+	switch (off) {
+	case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
+	     KVM_REG_ARM_CORE_REG(regs.regs[30]):
+	case KVM_REG_ARM_CORE_REG(regs.sp):
+	case KVM_REG_ARM_CORE_REG(regs.pc):
+	case KVM_REG_ARM_CORE_REG(regs.pstate):
+	case KVM_REG_ARM_CORE_REG(sp_el1):
+	case KVM_REG_ARM_CORE_REG(elr_el1):
+	case KVM_REG_ARM_CORE_REG(spsr[0]) ...
+	     KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
+		size = sizeof(__u64);
+		break;
+
+	case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
+	     KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
+		size = sizeof(__uint128_t);
+		break;
+
+	case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
+	case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
+		size = sizeof(__u32);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	if (KVM_REG_SIZE(reg->id) == size &&
+	    IS_ALIGNED(off, size / sizeof(__u32)))
+		return 0;
+
+	return -EINVAL;
+}
+
 static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 {
 {
 	/*
 	/*
@@ -76,6 +115,9 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 	    (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
 	    (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
 		return -ENOENT;
 		return -ENOENT;
 
 
+	if (validate_core_offset(reg))
+		return -EINVAL;
+
 	if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
 	if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
 		return -EFAULT;
 		return -EFAULT;
 
 
@@ -98,6 +140,9 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 	    (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
 	    (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
 		return -ENOENT;
 		return -ENOENT;
 
 
+	if (validate_core_offset(reg))
+		return -EINVAL;
+
 	if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
 	if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
 		return -EINVAL;
 		return -EINVAL;
 
 
@@ -107,17 +152,25 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 	}
 	}
 
 
 	if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
 	if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
-		u32 mode = (*(u32 *)valp) & PSR_AA32_MODE_MASK;
+		u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
 		switch (mode) {
 		switch (mode) {
 		case PSR_AA32_MODE_USR:
 		case PSR_AA32_MODE_USR:
+			if (!system_supports_32bit_el0())
+				return -EINVAL;
+			break;
 		case PSR_AA32_MODE_FIQ:
 		case PSR_AA32_MODE_FIQ:
 		case PSR_AA32_MODE_IRQ:
 		case PSR_AA32_MODE_IRQ:
 		case PSR_AA32_MODE_SVC:
 		case PSR_AA32_MODE_SVC:
 		case PSR_AA32_MODE_ABT:
 		case PSR_AA32_MODE_ABT:
 		case PSR_AA32_MODE_UND:
 		case PSR_AA32_MODE_UND:
+			if (!vcpu_el1_is_32bit(vcpu))
+				return -EINVAL;
+			break;
 		case PSR_MODE_EL0t:
 		case PSR_MODE_EL0t:
 		case PSR_MODE_EL1t:
 		case PSR_MODE_EL1t:
 		case PSR_MODE_EL1h:
 		case PSR_MODE_EL1h:
+			if (vcpu_el1_is_32bit(vcpu))
+				return -EINVAL;
 			break;
 			break;
 		default:
 		default:
 			err = -EINVAL;
 			err = -EINVAL;

+ 43 - 7
arch/arm64/mm/hugetlbpage.c

@@ -117,11 +117,14 @@ static pte_t get_clear_flush(struct mm_struct *mm,
 
 
 		/*
 		/*
 		 * If HW_AFDBM is enabled, then the HW could turn on
 		 * If HW_AFDBM is enabled, then the HW could turn on
-		 * the dirty bit for any page in the set, so check
-		 * them all.  All hugetlb entries are already young.
+		 * the dirty or accessed bit for any page in the set,
+		 * so check them all.
 		 */
 		 */
 		if (pte_dirty(pte))
 		if (pte_dirty(pte))
 			orig_pte = pte_mkdirty(orig_pte);
 			orig_pte = pte_mkdirty(orig_pte);
+
+		if (pte_young(pte))
+			orig_pte = pte_mkyoung(orig_pte);
 	}
 	}
 
 
 	if (valid) {
 	if (valid) {
@@ -320,11 +323,40 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
 	return get_clear_flush(mm, addr, ptep, pgsize, ncontig);
 	return get_clear_flush(mm, addr, ptep, pgsize, ncontig);
 }
 }
 
 
+/*
+ * huge_ptep_set_access_flags will update access flags (dirty, accesssed)
+ * and write permission.
+ *
+ * For a contiguous huge pte range we need to check whether or not write
+ * permission has to change only on the first pte in the set. Then for
+ * all the contiguous ptes we need to check whether or not there is a
+ * discrepancy between dirty or young.
+ */
+static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
+{
+	int i;
+
+	if (pte_write(pte) != pte_write(huge_ptep_get(ptep)))
+		return 1;
+
+	for (i = 0; i < ncontig; i++) {
+		pte_t orig_pte = huge_ptep_get(ptep + i);
+
+		if (pte_dirty(pte) != pte_dirty(orig_pte))
+			return 1;
+
+		if (pte_young(pte) != pte_young(orig_pte))
+			return 1;
+	}
+
+	return 0;
+}
+
 int huge_ptep_set_access_flags(struct vm_area_struct *vma,
 int huge_ptep_set_access_flags(struct vm_area_struct *vma,
 			       unsigned long addr, pte_t *ptep,
 			       unsigned long addr, pte_t *ptep,
 			       pte_t pte, int dirty)
 			       pte_t pte, int dirty)
 {
 {
-	int ncontig, i, changed = 0;
+	int ncontig, i;
 	size_t pgsize = 0;
 	size_t pgsize = 0;
 	unsigned long pfn = pte_pfn(pte), dpfn;
 	unsigned long pfn = pte_pfn(pte), dpfn;
 	pgprot_t hugeprot;
 	pgprot_t hugeprot;
@@ -336,19 +368,23 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
 	ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
 	ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
 	dpfn = pgsize >> PAGE_SHIFT;
 	dpfn = pgsize >> PAGE_SHIFT;
 
 
+	if (!__cont_access_flags_changed(ptep, pte, ncontig))
+		return 0;
+
 	orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
 	orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
-	if (!pte_same(orig_pte, pte))
-		changed = 1;
 
 
-	/* Make sure we don't lose the dirty state */
+	/* Make sure we don't lose the dirty or young state */
 	if (pte_dirty(orig_pte))
 	if (pte_dirty(orig_pte))
 		pte = pte_mkdirty(pte);
 		pte = pte_mkdirty(pte);
 
 
+	if (pte_young(orig_pte))
+		pte = pte_mkyoung(pte);
+
 	hugeprot = pte_pgprot(pte);
 	hugeprot = pte_pgprot(pte);
 	for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
 	for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
 		set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot));
 		set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot));
 
 
-	return changed;
+	return 1;
 }
 }
 
 
 void huge_ptep_set_wrprotect(struct mm_struct *mm,
 void huge_ptep_set_wrprotect(struct mm_struct *mm,

+ 10 - 0
arch/powerpc/kernel/process.c

@@ -1306,6 +1306,16 @@ void show_user_instructions(struct pt_regs *regs)
 
 
 	pc = regs->nip - (instructions_to_print * 3 / 4 * sizeof(int));
 	pc = regs->nip - (instructions_to_print * 3 / 4 * sizeof(int));
 
 
+	/*
+	 * Make sure the NIP points at userspace, not kernel text/data or
+	 * elsewhere.
+	 */
+	if (!__access_ok(pc, instructions_to_print * sizeof(int), USER_DS)) {
+		pr_info("%s[%d]: Bad NIP, not dumping instructions.\n",
+			current->comm, current->pid);
+		return;
+	}
+
 	pr_info("%s[%d]: code: ", current->comm, current->pid);
 	pr_info("%s[%d]: code: ", current->comm, current->pid);
 
 
 	for (i = 0; i < instructions_to_print; i++) {
 	for (i = 0; i < instructions_to_print; i++) {

+ 10 - 0
arch/powerpc/kvm/book3s_64_mmu_radix.c

@@ -646,6 +646,16 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
 	 */
 	 */
 	local_irq_disable();
 	local_irq_disable();
 	ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
 	ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
+	/*
+	 * If the PTE disappeared temporarily due to a THP
+	 * collapse, just return and let the guest try again.
+	 */
+	if (!ptep) {
+		local_irq_enable();
+		if (page)
+			put_page(page);
+		return RESUME_GUEST;
+	}
 	pte = *ptep;
 	pte = *ptep;
 	local_irq_enable();
 	local_irq_enable();
 
 

+ 12 - 8
arch/powerpc/lib/code-patching.c

@@ -28,12 +28,6 @@ static int __patch_instruction(unsigned int *exec_addr, unsigned int instr,
 {
 {
 	int err;
 	int err;
 
 
-	/* Make sure we aren't patching a freed init section */
-	if (init_mem_is_free && init_section_contains(exec_addr, 4)) {
-		pr_debug("Skipping init section patching addr: 0x%px\n", exec_addr);
-		return 0;
-	}
-
 	__put_user_size(instr, patch_addr, 4, err);
 	__put_user_size(instr, patch_addr, 4, err);
 	if (err)
 	if (err)
 		return err;
 		return err;
@@ -148,7 +142,7 @@ static inline int unmap_patch_area(unsigned long addr)
 	return 0;
 	return 0;
 }
 }
 
 
-int patch_instruction(unsigned int *addr, unsigned int instr)
+static int do_patch_instruction(unsigned int *addr, unsigned int instr)
 {
 {
 	int err;
 	int err;
 	unsigned int *patch_addr = NULL;
 	unsigned int *patch_addr = NULL;
@@ -188,12 +182,22 @@ out:
 }
 }
 #else /* !CONFIG_STRICT_KERNEL_RWX */
 #else /* !CONFIG_STRICT_KERNEL_RWX */
 
 
-int patch_instruction(unsigned int *addr, unsigned int instr)
+static int do_patch_instruction(unsigned int *addr, unsigned int instr)
 {
 {
 	return raw_patch_instruction(addr, instr);
 	return raw_patch_instruction(addr, instr);
 }
 }
 
 
 #endif /* CONFIG_STRICT_KERNEL_RWX */
 #endif /* CONFIG_STRICT_KERNEL_RWX */
+
+int patch_instruction(unsigned int *addr, unsigned int instr)
+{
+	/* Make sure we aren't patching a freed init section */
+	if (init_mem_is_free && init_section_contains(addr, 4)) {
+		pr_debug("Skipping init section patching addr: 0x%px\n", addr);
+		return 0;
+	}
+	return do_patch_instruction(addr, instr);
+}
 NOKPROBE_SYMBOL(patch_instruction);
 NOKPROBE_SYMBOL(patch_instruction);
 
 
 int patch_branch(unsigned int *addr, unsigned long target, int flags)
 int patch_branch(unsigned int *addr, unsigned long target, int flags)

+ 3 - 2
arch/powerpc/mm/numa.c

@@ -1217,9 +1217,10 @@ int find_and_online_cpu_nid(int cpu)
 		 * Need to ensure that NODE_DATA is initialized for a node from
 		 * Need to ensure that NODE_DATA is initialized for a node from
 		 * available memory (see memblock_alloc_try_nid). If unable to
 		 * available memory (see memblock_alloc_try_nid). If unable to
 		 * init the node, then default to nearest node that has memory
 		 * init the node, then default to nearest node that has memory
-		 * installed.
+		 * installed. Skip onlining a node if the subsystems are not
+		 * yet initialized.
 		 */
 		 */
-		if (try_online_node(new_nid))
+		if (!topology_inited || try_online_node(new_nid))
 			new_nid = first_online_node;
 			new_nid = first_online_node;
 #else
 #else
 		/*
 		/*

+ 1 - 1
arch/riscv/kernel/setup.c

@@ -186,7 +186,7 @@ static void __init setup_bootmem(void)
 	BUG_ON(mem_size == 0);
 	BUG_ON(mem_size == 0);
 
 
 	set_max_mapnr(PFN_DOWN(mem_size));
 	set_max_mapnr(PFN_DOWN(mem_size));
-	max_low_pfn = pfn_base + PFN_DOWN(mem_size);
+	max_low_pfn = memblock_end_of_DRAM();
 
 
 #ifdef CONFIG_BLK_DEV_INITRD
 #ifdef CONFIG_BLK_DEV_INITRD
 	setup_initrd();
 	setup_initrd();

+ 14 - 2
arch/x86/entry/vdso/Makefile

@@ -68,7 +68,13 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
 CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
 CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
        $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
        $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
        -fno-omit-frame-pointer -foptimize-sibling-calls \
        -fno-omit-frame-pointer -foptimize-sibling-calls \
-       -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO $(RETPOLINE_VDSO_CFLAGS)
+       -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
+
+ifdef CONFIG_RETPOLINE
+ifneq ($(RETPOLINE_VDSO_CFLAGS),)
+  CFL += $(RETPOLINE_VDSO_CFLAGS)
+endif
+endif
 
 
 $(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
 $(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
 
 
@@ -138,7 +144,13 @@ KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
 KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
 KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
 KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
 KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
 KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
 KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
-KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
+
+ifdef CONFIG_RETPOLINE
+ifneq ($(RETPOLINE_VDSO_CFLAGS),)
+  KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
+endif
+endif
+
 $(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
 $(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
 
 
 $(obj)/vdso32.so.dbg: FORCE \
 $(obj)/vdso32.so.dbg: FORCE \

+ 14 - 12
arch/x86/entry/vdso/vclock_gettime.c

@@ -43,8 +43,9 @@ extern u8 hvclock_page
 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
 {
 {
 	long ret;
 	long ret;
-	asm("syscall" : "=a" (ret) :
-	    "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
+	asm ("syscall" : "=a" (ret), "=m" (*ts) :
+	     "0" (__NR_clock_gettime), "D" (clock), "S" (ts) :
+	     "memory", "rcx", "r11");
 	return ret;
 	return ret;
 }
 }
 
 
@@ -52,8 +53,9 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
 {
 {
 	long ret;
 	long ret;
 
 
-	asm("syscall" : "=a" (ret) :
-	    "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
+	asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) :
+	     "0" (__NR_gettimeofday), "D" (tv), "S" (tz) :
+	     "memory", "rcx", "r11");
 	return ret;
 	return ret;
 }
 }
 
 
@@ -64,13 +66,13 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
 {
 {
 	long ret;
 	long ret;
 
 
-	asm(
+	asm (
 		"mov %%ebx, %%edx \n"
 		"mov %%ebx, %%edx \n"
-		"mov %2, %%ebx \n"
+		"mov %[clock], %%ebx \n"
 		"call __kernel_vsyscall \n"
 		"call __kernel_vsyscall \n"
 		"mov %%edx, %%ebx \n"
 		"mov %%edx, %%ebx \n"
-		: "=a" (ret)
-		: "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
+		: "=a" (ret), "=m" (*ts)
+		: "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts)
 		: "memory", "edx");
 		: "memory", "edx");
 	return ret;
 	return ret;
 }
 }
@@ -79,13 +81,13 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
 {
 {
 	long ret;
 	long ret;
 
 
-	asm(
+	asm (
 		"mov %%ebx, %%edx \n"
 		"mov %%ebx, %%edx \n"
-		"mov %2, %%ebx \n"
+		"mov %[tv], %%ebx \n"
 		"call __kernel_vsyscall \n"
 		"call __kernel_vsyscall \n"
 		"mov %%edx, %%ebx \n"
 		"mov %%edx, %%ebx \n"
-		: "=a" (ret)
-		: "0" (__NR_gettimeofday), "g" (tv), "c" (tz)
+		: "=a" (ret), "=m" (*tv), "=m" (*tz)
+		: "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz)
 		: "memory", "edx");
 		: "memory", "edx");
 	return ret;
 	return ret;
 }
 }

+ 6 - 0
arch/x86/include/asm/uv/uv.h

@@ -10,8 +10,13 @@ struct cpumask;
 struct mm_struct;
 struct mm_struct;
 
 
 #ifdef CONFIG_X86_UV
 #ifdef CONFIG_X86_UV
+#include <linux/efi.h>
 
 
 extern enum uv_system_type get_uv_system_type(void);
 extern enum uv_system_type get_uv_system_type(void);
+static inline bool is_early_uv_system(void)
+{
+	return !((efi.uv_systab == EFI_INVALID_TABLE_ADDR) || !efi.uv_systab);
+}
 extern int is_uv_system(void);
 extern int is_uv_system(void);
 extern int is_uv_hubless(void);
 extern int is_uv_hubless(void);
 extern void uv_cpu_init(void);
 extern void uv_cpu_init(void);
@@ -23,6 +28,7 @@ extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
 #else	/* X86_UV */
 #else	/* X86_UV */
 
 
 static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
 static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
+static inline bool is_early_uv_system(void)	{ return 0; }
 static inline int is_uv_system(void)	{ return 0; }
 static inline int is_uv_system(void)	{ return 0; }
 static inline int is_uv_hubless(void)	{ return 0; }
 static inline int is_uv_hubless(void)	{ return 0; }
 static inline void uv_cpu_init(void)	{ }
 static inline void uv_cpu_init(void)	{ }

+ 1 - 1
arch/x86/kernel/cpu/amd.c

@@ -922,7 +922,7 @@ static void init_amd(struct cpuinfo_x86 *c)
 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
 {
 {
 	/* AMD errata T13 (order #21922) */
 	/* AMD errata T13 (order #21922) */
-	if ((c->x86 == 6)) {
+	if (c->x86 == 6) {
 		/* Duron Rev A0 */
 		/* Duron Rev A0 */
 		if (c->x86_model == 3 && c->x86_stepping == 0)
 		if (c->x86_model == 3 && c->x86_stepping == 0)
 			size = 64;
 			size = 64;

+ 11 - 6
arch/x86/kernel/cpu/intel_rdt.c

@@ -485,9 +485,7 @@ static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d)
 	size_t tsize;
 	size_t tsize;
 
 
 	if (is_llc_occupancy_enabled()) {
 	if (is_llc_occupancy_enabled()) {
-		d->rmid_busy_llc = kcalloc(BITS_TO_LONGS(r->num_rmid),
-					   sizeof(unsigned long),
-					   GFP_KERNEL);
+		d->rmid_busy_llc = bitmap_zalloc(r->num_rmid, GFP_KERNEL);
 		if (!d->rmid_busy_llc)
 		if (!d->rmid_busy_llc)
 			return -ENOMEM;
 			return -ENOMEM;
 		INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo);
 		INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo);
@@ -496,7 +494,7 @@ static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d)
 		tsize = sizeof(*d->mbm_total);
 		tsize = sizeof(*d->mbm_total);
 		d->mbm_total = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
 		d->mbm_total = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
 		if (!d->mbm_total) {
 		if (!d->mbm_total) {
-			kfree(d->rmid_busy_llc);
+			bitmap_free(d->rmid_busy_llc);
 			return -ENOMEM;
 			return -ENOMEM;
 		}
 		}
 	}
 	}
@@ -504,7 +502,7 @@ static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d)
 		tsize = sizeof(*d->mbm_local);
 		tsize = sizeof(*d->mbm_local);
 		d->mbm_local = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
 		d->mbm_local = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
 		if (!d->mbm_local) {
 		if (!d->mbm_local) {
-			kfree(d->rmid_busy_llc);
+			bitmap_free(d->rmid_busy_llc);
 			kfree(d->mbm_total);
 			kfree(d->mbm_total);
 			return -ENOMEM;
 			return -ENOMEM;
 		}
 		}
@@ -610,9 +608,16 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
 			cancel_delayed_work(&d->cqm_limbo);
 			cancel_delayed_work(&d->cqm_limbo);
 		}
 		}
 
 
+		/*
+		 * rdt_domain "d" is going to be freed below, so clear
+		 * its pointer from pseudo_lock_region struct.
+		 */
+		if (d->plr)
+			d->plr->d = NULL;
+
 		kfree(d->ctrl_val);
 		kfree(d->ctrl_val);
 		kfree(d->mbps_val);
 		kfree(d->mbps_val);
-		kfree(d->rmid_busy_llc);
+		bitmap_free(d->rmid_busy_llc);
 		kfree(d->mbm_total);
 		kfree(d->mbm_total);
 		kfree(d->mbm_local);
 		kfree(d->mbm_local);
 		kfree(d);
 		kfree(d);

+ 3 - 3
arch/x86/kernel/cpu/intel_rdt.h

@@ -529,14 +529,14 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
 int rdtgroup_schemata_show(struct kernfs_open_file *of,
 int rdtgroup_schemata_show(struct kernfs_open_file *of,
 			   struct seq_file *s, void *v);
 			   struct seq_file *s, void *v);
 bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
 bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
-			   u32 _cbm, int closid, bool exclusive);
+			   unsigned long cbm, int closid, bool exclusive);
 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d,
 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d,
-				  u32 cbm);
+				  unsigned long cbm);
 enum rdtgrp_mode rdtgroup_mode_by_closid(int closid);
 enum rdtgrp_mode rdtgroup_mode_by_closid(int closid);
 int rdtgroup_tasks_assigned(struct rdtgroup *r);
 int rdtgroup_tasks_assigned(struct rdtgroup *r);
 int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp);
 int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp);
 int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp);
 int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp);
-bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm);
+bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm);
 bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d);
 bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d);
 int rdt_pseudo_lock_init(void);
 int rdt_pseudo_lock_init(void);
 void rdt_pseudo_lock_release(void);
 void rdt_pseudo_lock_release(void);

+ 10 - 2
arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c

@@ -404,8 +404,16 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of,
 			for_each_alloc_enabled_rdt_resource(r)
 			for_each_alloc_enabled_rdt_resource(r)
 				seq_printf(s, "%s:uninitialized\n", r->name);
 				seq_printf(s, "%s:uninitialized\n", r->name);
 		} else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
 		} else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
-			seq_printf(s, "%s:%d=%x\n", rdtgrp->plr->r->name,
-				   rdtgrp->plr->d->id, rdtgrp->plr->cbm);
+			if (!rdtgrp->plr->d) {
+				rdt_last_cmd_clear();
+				rdt_last_cmd_puts("Cache domain offline\n");
+				ret = -ENODEV;
+			} else {
+				seq_printf(s, "%s:%d=%x\n",
+					   rdtgrp->plr->r->name,
+					   rdtgrp->plr->d->id,
+					   rdtgrp->plr->cbm);
+			}
 		} else {
 		} else {
 			closid = rdtgrp->closid;
 			closid = rdtgrp->closid;
 			for_each_alloc_enabled_rdt_resource(r) {
 			for_each_alloc_enabled_rdt_resource(r) {

+ 22 - 10
arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c

@@ -789,25 +789,27 @@ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
 /**
 /**
  * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked
  * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked
  * @d: RDT domain
  * @d: RDT domain
- * @_cbm: CBM to test
+ * @cbm: CBM to test
  *
  *
- * @d represents a cache instance and @_cbm a capacity bitmask that is
- * considered for it. Determine if @_cbm overlaps with any existing
+ * @d represents a cache instance and @cbm a capacity bitmask that is
+ * considered for it. Determine if @cbm overlaps with any existing
  * pseudo-locked region on @d.
  * pseudo-locked region on @d.
  *
  *
- * Return: true if @_cbm overlaps with pseudo-locked region on @d, false
+ * @cbm is unsigned long, even if only 32 bits are used, to make the
+ * bitmap functions work correctly.
+ *
+ * Return: true if @cbm overlaps with pseudo-locked region on @d, false
  * otherwise.
  * otherwise.
  */
  */
-bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm)
+bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm)
 {
 {
-	unsigned long *cbm = (unsigned long *)&_cbm;
-	unsigned long *cbm_b;
 	unsigned int cbm_len;
 	unsigned int cbm_len;
+	unsigned long cbm_b;
 
 
 	if (d->plr) {
 	if (d->plr) {
 		cbm_len = d->plr->r->cache.cbm_len;
 		cbm_len = d->plr->r->cache.cbm_len;
-		cbm_b = (unsigned long *)&d->plr->cbm;
-		if (bitmap_intersects(cbm, cbm_b, cbm_len))
+		cbm_b = d->plr->cbm;
+		if (bitmap_intersects(&cbm, &cbm_b, cbm_len))
 			return true;
 			return true;
 	}
 	}
 	return false;
 	return false;
@@ -1172,6 +1174,11 @@ static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel)
 		goto out;
 		goto out;
 	}
 	}
 
 
+	if (!plr->d) {
+		ret = -ENODEV;
+		goto out;
+	}
+
 	plr->thread_done = 0;
 	plr->thread_done = 0;
 	cpu = cpumask_first(&plr->d->cpu_mask);
 	cpu = cpumask_first(&plr->d->cpu_mask);
 	if (!cpu_online(cpu)) {
 	if (!cpu_online(cpu)) {
@@ -1236,7 +1243,7 @@ static ssize_t pseudo_lock_measure_trigger(struct file *file,
 	buf[buf_size] = '\0';
 	buf[buf_size] = '\0';
 	ret = kstrtoint(buf, 10, &sel);
 	ret = kstrtoint(buf, 10, &sel);
 	if (ret == 0) {
 	if (ret == 0) {
-		if (sel != 1)
+		if (sel != 1 && sel != 2 && sel != 3)
 			return -EINVAL;
 			return -EINVAL;
 		ret = debugfs_file_get(file->f_path.dentry);
 		ret = debugfs_file_get(file->f_path.dentry);
 		if (ret)
 		if (ret)
@@ -1492,6 +1499,11 @@ static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma)
 
 
 	plr = rdtgrp->plr;
 	plr = rdtgrp->plr;
 
 
+	if (!plr->d) {
+		mutex_unlock(&rdtgroup_mutex);
+		return -ENODEV;
+	}
+
 	/*
 	/*
 	 * Task is required to run with affinity to the cpus associated
 	 * Task is required to run with affinity to the cpus associated
 	 * with the pseudo-locked region. If this is not the case the task
 	 * with the pseudo-locked region. If this is not the case the task

+ 182 - 28
arch/x86/kernel/cpu/intel_rdt_rdtgroup.c

@@ -268,17 +268,27 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
 			      struct seq_file *s, void *v)
 			      struct seq_file *s, void *v)
 {
 {
 	struct rdtgroup *rdtgrp;
 	struct rdtgroup *rdtgrp;
+	struct cpumask *mask;
 	int ret = 0;
 	int ret = 0;
 
 
 	rdtgrp = rdtgroup_kn_lock_live(of->kn);
 	rdtgrp = rdtgroup_kn_lock_live(of->kn);
 
 
 	if (rdtgrp) {
 	if (rdtgrp) {
-		if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
-			seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
-				   cpumask_pr_args(&rdtgrp->plr->d->cpu_mask));
-		else
+		if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
+			if (!rdtgrp->plr->d) {
+				rdt_last_cmd_clear();
+				rdt_last_cmd_puts("Cache domain offline\n");
+				ret = -ENODEV;
+			} else {
+				mask = &rdtgrp->plr->d->cpu_mask;
+				seq_printf(s, is_cpu_list(of) ?
+					   "%*pbl\n" : "%*pb\n",
+					   cpumask_pr_args(mask));
+			}
+		} else {
 			seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
 			seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
 				   cpumask_pr_args(&rdtgrp->cpu_mask));
 				   cpumask_pr_args(&rdtgrp->cpu_mask));
+		}
 	} else {
 	} else {
 		ret = -ENOENT;
 		ret = -ENOENT;
 	}
 	}
@@ -961,7 +971,78 @@ static int rdtgroup_mode_show(struct kernfs_open_file *of,
 }
 }
 
 
 /**
 /**
- * rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other
+ * rdt_cdp_peer_get - Retrieve CDP peer if it exists
+ * @r: RDT resource to which RDT domain @d belongs
+ * @d: Cache instance for which a CDP peer is requested
+ * @r_cdp: RDT resource that shares hardware with @r (RDT resource peer)
+ *         Used to return the result.
+ * @d_cdp: RDT domain that shares hardware with @d (RDT domain peer)
+ *         Used to return the result.
+ *
+ * RDT resources are managed independently and by extension the RDT domains
+ * (RDT resource instances) are managed independently also. The Code and
+ * Data Prioritization (CDP) RDT resources, while managed independently,
+ * could refer to the same underlying hardware. For example,
+ * RDT_RESOURCE_L2CODE and RDT_RESOURCE_L2DATA both refer to the L2 cache.
+ *
+ * When provided with an RDT resource @r and an instance of that RDT
+ * resource @d rdt_cdp_peer_get() will return if there is a peer RDT
+ * resource and the exact instance that shares the same hardware.
+ *
+ * Return: 0 if a CDP peer was found, <0 on error or if no CDP peer exists.
+ *         If a CDP peer was found, @r_cdp will point to the peer RDT resource
+ *         and @d_cdp will point to the peer RDT domain.
+ */
+static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d,
+			    struct rdt_resource **r_cdp,
+			    struct rdt_domain **d_cdp)
+{
+	struct rdt_resource *_r_cdp = NULL;
+	struct rdt_domain *_d_cdp = NULL;
+	int ret = 0;
+
+	switch (r->rid) {
+	case RDT_RESOURCE_L3DATA:
+		_r_cdp = &rdt_resources_all[RDT_RESOURCE_L3CODE];
+		break;
+	case RDT_RESOURCE_L3CODE:
+		_r_cdp =  &rdt_resources_all[RDT_RESOURCE_L3DATA];
+		break;
+	case RDT_RESOURCE_L2DATA:
+		_r_cdp =  &rdt_resources_all[RDT_RESOURCE_L2CODE];
+		break;
+	case RDT_RESOURCE_L2CODE:
+		_r_cdp =  &rdt_resources_all[RDT_RESOURCE_L2DATA];
+		break;
+	default:
+		ret = -ENOENT;
+		goto out;
+	}
+
+	/*
+	 * When a new CPU comes online and CDP is enabled then the new
+	 * RDT domains (if any) associated with both CDP RDT resources
+	 * are added in the same CPU online routine while the
+	 * rdtgroup_mutex is held. It should thus not happen for one
+	 * RDT domain to exist and be associated with its RDT CDP
+	 * resource but there is no RDT domain associated with the
+	 * peer RDT CDP resource. Hence the WARN.
+	 */
+	_d_cdp = rdt_find_domain(_r_cdp, d->id, NULL);
+	if (WARN_ON(!_d_cdp)) {
+		_r_cdp = NULL;
+		ret = -EINVAL;
+	}
+
+out:
+	*r_cdp = _r_cdp;
+	*d_cdp = _d_cdp;
+
+	return ret;
+}
+
+/**
+ * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other
  * @r: Resource to which domain instance @d belongs.
  * @r: Resource to which domain instance @d belongs.
  * @d: The domain instance for which @closid is being tested.
  * @d: The domain instance for which @closid is being tested.
  * @cbm: Capacity bitmask being tested.
  * @cbm: Capacity bitmask being tested.
@@ -975,33 +1056,34 @@ static int rdtgroup_mode_show(struct kernfs_open_file *of,
  * is false then overlaps with any resource group or hardware entities
  * is false then overlaps with any resource group or hardware entities
  * will be considered.
  * will be considered.
  *
  *
+ * @cbm is unsigned long, even if only 32 bits are used, to make the
+ * bitmap functions work correctly.
+ *
  * Return: false if CBM does not overlap, true if it does.
  * Return: false if CBM does not overlap, true if it does.
  */
  */
-bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
-			   u32 _cbm, int closid, bool exclusive)
+static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
+				    unsigned long cbm, int closid, bool exclusive)
 {
 {
-	unsigned long *cbm = (unsigned long *)&_cbm;
-	unsigned long *ctrl_b;
 	enum rdtgrp_mode mode;
 	enum rdtgrp_mode mode;
+	unsigned long ctrl_b;
 	u32 *ctrl;
 	u32 *ctrl;
 	int i;
 	int i;
 
 
 	/* Check for any overlap with regions used by hardware directly */
 	/* Check for any overlap with regions used by hardware directly */
 	if (!exclusive) {
 	if (!exclusive) {
-		if (bitmap_intersects(cbm,
-				      (unsigned long *)&r->cache.shareable_bits,
-				      r->cache.cbm_len))
+		ctrl_b = r->cache.shareable_bits;
+		if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len))
 			return true;
 			return true;
 	}
 	}
 
 
 	/* Check for overlap with other resource groups */
 	/* Check for overlap with other resource groups */
 	ctrl = d->ctrl_val;
 	ctrl = d->ctrl_val;
 	for (i = 0; i < closids_supported(); i++, ctrl++) {
 	for (i = 0; i < closids_supported(); i++, ctrl++) {
-		ctrl_b = (unsigned long *)ctrl;
+		ctrl_b = *ctrl;
 		mode = rdtgroup_mode_by_closid(i);
 		mode = rdtgroup_mode_by_closid(i);
 		if (closid_allocated(i) && i != closid &&
 		if (closid_allocated(i) && i != closid &&
 		    mode != RDT_MODE_PSEUDO_LOCKSETUP) {
 		    mode != RDT_MODE_PSEUDO_LOCKSETUP) {
-			if (bitmap_intersects(cbm, ctrl_b, r->cache.cbm_len)) {
+			if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) {
 				if (exclusive) {
 				if (exclusive) {
 					if (mode == RDT_MODE_EXCLUSIVE)
 					if (mode == RDT_MODE_EXCLUSIVE)
 						return true;
 						return true;
@@ -1015,6 +1097,41 @@ bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
 	return false;
 	return false;
 }
 }
 
 
+/**
+ * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware
+ * @r: Resource to which domain instance @d belongs.
+ * @d: The domain instance for which @closid is being tested.
+ * @cbm: Capacity bitmask being tested.
+ * @closid: Intended closid for @cbm.
+ * @exclusive: Only check if overlaps with exclusive resource groups
+ *
+ * Resources that can be allocated using a CBM can use the CBM to control
+ * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test
+ * for overlap. Overlap test is not limited to the specific resource for
+ * which the CBM is intended though - when dealing with CDP resources that
+ * share the underlying hardware the overlap check should be performed on
+ * the CDP resource sharing the hardware also.
+ *
+ * Refer to description of __rdtgroup_cbm_overlaps() for the details of the
+ * overlap test.
+ *
+ * Return: true if CBM overlap detected, false if there is no overlap
+ */
+bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
+			   unsigned long cbm, int closid, bool exclusive)
+{
+	struct rdt_resource *r_cdp;
+	struct rdt_domain *d_cdp;
+
+	if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, exclusive))
+		return true;
+
+	if (rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp) < 0)
+		return false;
+
+	return  __rdtgroup_cbm_overlaps(r_cdp, d_cdp, cbm, closid, exclusive);
+}
+
 /**
 /**
  * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive
  * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive
  *
  *
@@ -1138,15 +1255,18 @@ out:
  * computed by first dividing the total cache size by the CBM length to
  * computed by first dividing the total cache size by the CBM length to
  * determine how many bytes each bit in the bitmask represents. The result
  * determine how many bytes each bit in the bitmask represents. The result
  * is multiplied with the number of bits set in the bitmask.
  * is multiplied with the number of bits set in the bitmask.
+ *
+ * @cbm is unsigned long, even if only 32 bits are used to make the
+ * bitmap functions work correctly.
  */
  */
 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
-				  struct rdt_domain *d, u32 cbm)
+				  struct rdt_domain *d, unsigned long cbm)
 {
 {
 	struct cpu_cacheinfo *ci;
 	struct cpu_cacheinfo *ci;
 	unsigned int size = 0;
 	unsigned int size = 0;
 	int num_b, i;
 	int num_b, i;
 
 
-	num_b = bitmap_weight((unsigned long *)&cbm, r->cache.cbm_len);
+	num_b = bitmap_weight(&cbm, r->cache.cbm_len);
 	ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask));
 	ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask));
 	for (i = 0; i < ci->num_leaves; i++) {
 	for (i = 0; i < ci->num_leaves; i++) {
 		if (ci->info_list[i].level == r->cache_level) {
 		if (ci->info_list[i].level == r->cache_level) {
@@ -1172,6 +1292,7 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
 	struct rdt_resource *r;
 	struct rdt_resource *r;
 	struct rdt_domain *d;
 	struct rdt_domain *d;
 	unsigned int size;
 	unsigned int size;
+	int ret = 0;
 	bool sep;
 	bool sep;
 	u32 ctrl;
 	u32 ctrl;
 
 
@@ -1182,11 +1303,18 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
 	}
 	}
 
 
 	if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
 	if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
-		seq_printf(s, "%*s:", max_name_width, rdtgrp->plr->r->name);
-		size = rdtgroup_cbm_to_size(rdtgrp->plr->r,
-					    rdtgrp->plr->d,
-					    rdtgrp->plr->cbm);
-		seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size);
+		if (!rdtgrp->plr->d) {
+			rdt_last_cmd_clear();
+			rdt_last_cmd_puts("Cache domain offline\n");
+			ret = -ENODEV;
+		} else {
+			seq_printf(s, "%*s:", max_name_width,
+				   rdtgrp->plr->r->name);
+			size = rdtgroup_cbm_to_size(rdtgrp->plr->r,
+						    rdtgrp->plr->d,
+						    rdtgrp->plr->cbm);
+			seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size);
+		}
 		goto out;
 		goto out;
 	}
 	}
 
 
@@ -1216,7 +1344,7 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
 out:
 out:
 	rdtgroup_kn_unlock(of->kn);
 	rdtgroup_kn_unlock(of->kn);
 
 
-	return 0;
+	return ret;
 }
 }
 
 
 /* rdtgroup information files for one cache resource. */
 /* rdtgroup information files for one cache resource. */
@@ -2350,13 +2478,16 @@ static void cbm_ensure_valid(u32 *_val, struct rdt_resource *r)
  */
  */
 static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
 static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
 {
 {
+	struct rdt_resource *r_cdp = NULL;
+	struct rdt_domain *d_cdp = NULL;
 	u32 used_b = 0, unused_b = 0;
 	u32 used_b = 0, unused_b = 0;
 	u32 closid = rdtgrp->closid;
 	u32 closid = rdtgrp->closid;
 	struct rdt_resource *r;
 	struct rdt_resource *r;
+	unsigned long tmp_cbm;
 	enum rdtgrp_mode mode;
 	enum rdtgrp_mode mode;
 	struct rdt_domain *d;
 	struct rdt_domain *d;
+	u32 peer_ctl, *ctrl;
 	int i, ret;
 	int i, ret;
-	u32 *ctrl;
 
 
 	for_each_alloc_enabled_rdt_resource(r) {
 	for_each_alloc_enabled_rdt_resource(r) {
 		/*
 		/*
@@ -2366,6 +2497,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
 		if (r->rid == RDT_RESOURCE_MBA)
 		if (r->rid == RDT_RESOURCE_MBA)
 			continue;
 			continue;
 		list_for_each_entry(d, &r->domains, list) {
 		list_for_each_entry(d, &r->domains, list) {
+			rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp);
 			d->have_new_ctrl = false;
 			d->have_new_ctrl = false;
 			d->new_ctrl = r->cache.shareable_bits;
 			d->new_ctrl = r->cache.shareable_bits;
 			used_b = r->cache.shareable_bits;
 			used_b = r->cache.shareable_bits;
@@ -2375,9 +2507,19 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
 					mode = rdtgroup_mode_by_closid(i);
 					mode = rdtgroup_mode_by_closid(i);
 					if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
 					if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
 						break;
 						break;
-					used_b |= *ctrl;
+					/*
+					 * If CDP is active include peer
+					 * domain's usage to ensure there
+					 * is no overlap with an exclusive
+					 * group.
+					 */
+					if (d_cdp)
+						peer_ctl = d_cdp->ctrl_val[i];
+					else
+						peer_ctl = 0;
+					used_b |= *ctrl | peer_ctl;
 					if (mode == RDT_MODE_SHAREABLE)
 					if (mode == RDT_MODE_SHAREABLE)
-						d->new_ctrl |= *ctrl;
+						d->new_ctrl |= *ctrl | peer_ctl;
 				}
 				}
 			}
 			}
 			if (d->plr && d->plr->cbm > 0)
 			if (d->plr && d->plr->cbm > 0)
@@ -2390,9 +2532,14 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
 			 * modify the CBM based on system availability.
 			 * modify the CBM based on system availability.
 			 */
 			 */
 			cbm_ensure_valid(&d->new_ctrl, r);
 			cbm_ensure_valid(&d->new_ctrl, r);
-			if (bitmap_weight((unsigned long *) &d->new_ctrl,
-					  r->cache.cbm_len) <
-					r->cache.min_cbm_bits) {
+			/*
+			 * Assign the u32 CBM to an unsigned long to ensure
+			 * that bitmap_weight() does not access out-of-bound
+			 * memory.
+			 */
+			tmp_cbm = d->new_ctrl;
+			if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) <
+			    r->cache.min_cbm_bits) {
 				rdt_last_cmd_printf("no space on %s:%d\n",
 				rdt_last_cmd_printf("no space on %s:%d\n",
 						    r->name, d->id);
 						    r->name, d->id);
 				return -ENOSPC;
 				return -ENOSPC;
@@ -2795,6 +2942,13 @@ static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf)
 {
 {
 	if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
 	if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
 		seq_puts(seq, ",cdp");
 		seq_puts(seq, ",cdp");
+
+	if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled)
+		seq_puts(seq, ",cdpl2");
+
+	if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA]))
+		seq_puts(seq, ",mba_MBps");
+
 	return 0;
 	return 0;
 }
 }
 
 

+ 4 - 0
arch/x86/kernel/tsc.c

@@ -26,6 +26,7 @@
 #include <asm/apic.h>
 #include <asm/apic.h>
 #include <asm/intel-family.h>
 #include <asm/intel-family.h>
 #include <asm/i8259.h>
 #include <asm/i8259.h>
+#include <asm/uv/uv.h>
 
 
 unsigned int __read_mostly cpu_khz;	/* TSC clocks / usec, not used here */
 unsigned int __read_mostly cpu_khz;	/* TSC clocks / usec, not used here */
 EXPORT_SYMBOL(cpu_khz);
 EXPORT_SYMBOL(cpu_khz);
@@ -1433,6 +1434,9 @@ void __init tsc_early_init(void)
 {
 {
 	if (!boot_cpu_has(X86_FEATURE_TSC))
 	if (!boot_cpu_has(X86_FEATURE_TSC))
 		return;
 		return;
+	/* Don't change UV TSC multi-chassis synchronization */
+	if (is_early_uv_system())
+		return;
 	if (!determine_cpu_tsc_frequencies(true))
 	if (!determine_cpu_tsc_frequencies(true))
 		return;
 		return;
 	loops_per_jiffy = get_loops_per_jiffy();
 	loops_per_jiffy = get_loops_per_jiffy();

+ 20 - 4
arch/x86/kvm/mmu.c

@@ -249,6 +249,17 @@ static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
  */
  */
 static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
 static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
 
 
+/*
+ * In some cases, we need to preserve the GFN of a non-present or reserved
+ * SPTE when we usurp the upper five bits of the physical address space to
+ * defend against L1TF, e.g. for MMIO SPTEs.  To preserve the GFN, we'll
+ * shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask
+ * left into the reserved bits, i.e. the GFN in the SPTE will be split into
+ * high and low parts.  This mask covers the lower bits of the GFN.
+ */
+static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
+
+
 static void mmu_spte_set(u64 *sptep, u64 spte);
 static void mmu_spte_set(u64 *sptep, u64 spte);
 static union kvm_mmu_page_role
 static union kvm_mmu_page_role
 kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
 kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
@@ -357,9 +368,7 @@ static bool is_mmio_spte(u64 spte)
 
 
 static gfn_t get_mmio_spte_gfn(u64 spte)
 static gfn_t get_mmio_spte_gfn(u64 spte)
 {
 {
-	u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask |
-		   shadow_nonpresent_or_rsvd_mask;
-	u64 gpa = spte & ~mask;
+	u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
 
 
 	gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
 	gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
 	       & shadow_nonpresent_or_rsvd_mask;
 	       & shadow_nonpresent_or_rsvd_mask;
@@ -423,6 +432,8 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
 
 
 static void kvm_mmu_reset_all_pte_masks(void)
 static void kvm_mmu_reset_all_pte_masks(void)
 {
 {
+	u8 low_phys_bits;
+
 	shadow_user_mask = 0;
 	shadow_user_mask = 0;
 	shadow_accessed_mask = 0;
 	shadow_accessed_mask = 0;
 	shadow_dirty_mask = 0;
 	shadow_dirty_mask = 0;
@@ -437,12 +448,17 @@ static void kvm_mmu_reset_all_pte_masks(void)
 	 * appropriate mask to guard against L1TF attacks. Otherwise, it is
 	 * appropriate mask to guard against L1TF attacks. Otherwise, it is
 	 * assumed that the CPU is not vulnerable to L1TF.
 	 * assumed that the CPU is not vulnerable to L1TF.
 	 */
 	 */
+	low_phys_bits = boot_cpu_data.x86_phys_bits;
 	if (boot_cpu_data.x86_phys_bits <
 	if (boot_cpu_data.x86_phys_bits <
-	    52 - shadow_nonpresent_or_rsvd_mask_len)
+	    52 - shadow_nonpresent_or_rsvd_mask_len) {
 		shadow_nonpresent_or_rsvd_mask =
 		shadow_nonpresent_or_rsvd_mask =
 			rsvd_bits(boot_cpu_data.x86_phys_bits -
 			rsvd_bits(boot_cpu_data.x86_phys_bits -
 				  shadow_nonpresent_or_rsvd_mask_len,
 				  shadow_nonpresent_or_rsvd_mask_len,
 				  boot_cpu_data.x86_phys_bits - 1);
 				  boot_cpu_data.x86_phys_bits - 1);
+		low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len;
+	}
+	shadow_nonpresent_or_rsvd_lower_gfn_mask =
+		GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
 }
 }
 
 
 static int is_cpuid_PSE36(void)
 static int is_cpuid_PSE36(void)

+ 76 - 61
arch/x86/kvm/vmx.c

@@ -121,7 +121,6 @@ module_param_named(pml, enable_pml, bool, S_IRUGO);
 
 
 #define MSR_BITMAP_MODE_X2APIC		1
 #define MSR_BITMAP_MODE_X2APIC		1
 #define MSR_BITMAP_MODE_X2APIC_APICV	2
 #define MSR_BITMAP_MODE_X2APIC_APICV	2
-#define MSR_BITMAP_MODE_LM		4
 
 
 #define KVM_VMX_TSC_MULTIPLIER_MAX     0xffffffffffffffffULL
 #define KVM_VMX_TSC_MULTIPLIER_MAX     0xffffffffffffffffULL
 
 
@@ -857,6 +856,7 @@ struct nested_vmx {
 
 
 	/* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
 	/* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
 	u64 vmcs01_debugctl;
 	u64 vmcs01_debugctl;
+	u64 vmcs01_guest_bndcfgs;
 
 
 	u16 vpid02;
 	u16 vpid02;
 	u16 last_vpid;
 	u16 last_vpid;
@@ -2899,8 +2899,7 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
 		vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
 		vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
 	}
 	}
 
 
-	if (is_long_mode(&vmx->vcpu))
-		wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+	wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
 #else
 #else
 	savesegment(fs, fs_sel);
 	savesegment(fs, fs_sel);
 	savesegment(gs, gs_sel);
 	savesegment(gs, gs_sel);
@@ -2951,8 +2950,7 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
 	vmx->loaded_cpu_state = NULL;
 	vmx->loaded_cpu_state = NULL;
 
 
 #ifdef CONFIG_X86_64
 #ifdef CONFIG_X86_64
-	if (is_long_mode(&vmx->vcpu))
-		rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+	rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
 #endif
 #endif
 	if (host_state->ldt_sel || (host_state->gs_sel & 7)) {
 	if (host_state->ldt_sel || (host_state->gs_sel & 7)) {
 		kvm_load_ldt(host_state->ldt_sel);
 		kvm_load_ldt(host_state->ldt_sel);
@@ -2980,24 +2978,19 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
 #ifdef CONFIG_X86_64
 #ifdef CONFIG_X86_64
 static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
 static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
 {
 {
-	if (is_long_mode(&vmx->vcpu)) {
-		preempt_disable();
-		if (vmx->loaded_cpu_state)
-			rdmsrl(MSR_KERNEL_GS_BASE,
-			       vmx->msr_guest_kernel_gs_base);
-		preempt_enable();
-	}
+	preempt_disable();
+	if (vmx->loaded_cpu_state)
+		rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+	preempt_enable();
 	return vmx->msr_guest_kernel_gs_base;
 	return vmx->msr_guest_kernel_gs_base;
 }
 }
 
 
 static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
 static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
 {
 {
-	if (is_long_mode(&vmx->vcpu)) {
-		preempt_disable();
-		if (vmx->loaded_cpu_state)
-			wrmsrl(MSR_KERNEL_GS_BASE, data);
-		preempt_enable();
-	}
+	preempt_disable();
+	if (vmx->loaded_cpu_state)
+		wrmsrl(MSR_KERNEL_GS_BASE, data);
+	preempt_enable();
 	vmx->msr_guest_kernel_gs_base = data;
 	vmx->msr_guest_kernel_gs_base = data;
 }
 }
 #endif
 #endif
@@ -3533,9 +3526,6 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
 		VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
 		VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
 		VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
 		VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
 
 
-	if (kvm_mpx_supported())
-		msrs->exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
-
 	/* We support free control of debug control saving. */
 	/* We support free control of debug control saving. */
 	msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
 	msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
 
 
@@ -3552,8 +3542,6 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
 		VM_ENTRY_LOAD_IA32_PAT;
 		VM_ENTRY_LOAD_IA32_PAT;
 	msrs->entry_ctls_high |=
 	msrs->entry_ctls_high |=
 		(VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
 		(VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
-	if (kvm_mpx_supported())
-		msrs->entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
 
 
 	/* We support free control of debug control loading. */
 	/* We support free control of debug control loading. */
 	msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
 	msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
@@ -3601,12 +3589,12 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
 		msrs->secondary_ctls_high);
 		msrs->secondary_ctls_high);
 	msrs->secondary_ctls_low = 0;
 	msrs->secondary_ctls_low = 0;
 	msrs->secondary_ctls_high &=
 	msrs->secondary_ctls_high &=
-		SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
 		SECONDARY_EXEC_DESC |
 		SECONDARY_EXEC_DESC |
 		SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
 		SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
 		SECONDARY_EXEC_APIC_REGISTER_VIRT |
 		SECONDARY_EXEC_APIC_REGISTER_VIRT |
 		SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
 		SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
 		SECONDARY_EXEC_WBINVD_EXITING;
 		SECONDARY_EXEC_WBINVD_EXITING;
+
 	/*
 	/*
 	 * We can emulate "VMCS shadowing," even if the hardware
 	 * We can emulate "VMCS shadowing," even if the hardware
 	 * doesn't support it.
 	 * doesn't support it.
@@ -3663,6 +3651,10 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
 		msrs->secondary_ctls_high |=
 		msrs->secondary_ctls_high |=
 			SECONDARY_EXEC_UNRESTRICTED_GUEST;
 			SECONDARY_EXEC_UNRESTRICTED_GUEST;
 
 
+	if (flexpriority_enabled)
+		msrs->secondary_ctls_high |=
+			SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+
 	/* miscellaneous data */
 	/* miscellaneous data */
 	rdmsr(MSR_IA32_VMX_MISC,
 	rdmsr(MSR_IA32_VMX_MISC,
 		msrs->misc_low,
 		msrs->misc_low,
@@ -5073,19 +5065,6 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
 	if (!msr)
 	if (!msr)
 		return;
 		return;
 
 
-	/*
-	 * MSR_KERNEL_GS_BASE is not intercepted when the guest is in
-	 * 64-bit mode as a 64-bit kernel may frequently access the
-	 * MSR.  This means we need to manually save/restore the MSR
-	 * when switching between guest and host state, but only if
-	 * the guest is in 64-bit mode.  Sync our cached value if the
-	 * guest is transitioning to 32-bit mode and the CPU contains
-	 * guest state, i.e. the cache is stale.
-	 */
-#ifdef CONFIG_X86_64
-	if (!(efer & EFER_LMA))
-		(void)vmx_read_guest_kernel_gs_base(vmx);
-#endif
 	vcpu->arch.efer = efer;
 	vcpu->arch.efer = efer;
 	if (efer & EFER_LMA) {
 	if (efer & EFER_LMA) {
 		vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
 		vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
@@ -6078,9 +6057,6 @@ static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
 			mode |= MSR_BITMAP_MODE_X2APIC_APICV;
 			mode |= MSR_BITMAP_MODE_X2APIC_APICV;
 	}
 	}
 
 
-	if (is_long_mode(vcpu))
-		mode |= MSR_BITMAP_MODE_LM;
-
 	return mode;
 	return mode;
 }
 }
 
 
@@ -6121,9 +6097,6 @@ static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu)
 	if (!changed)
 	if (!changed)
 		return;
 		return;
 
 
-	vmx_set_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW,
-				  !(mode & MSR_BITMAP_MODE_LM));
-
 	if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV))
 	if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV))
 		vmx_update_msr_bitmap_x2apic(msr_bitmap, mode);
 		vmx_update_msr_bitmap_x2apic(msr_bitmap, mode);
 
 
@@ -6189,6 +6162,11 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
 	nested_mark_vmcs12_pages_dirty(vcpu);
 	nested_mark_vmcs12_pages_dirty(vcpu);
 }
 }
 
 
+static u8 vmx_get_rvi(void)
+{
+	return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
+}
+
 static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
 static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
 {
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -6201,7 +6179,7 @@ static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
 		WARN_ON_ONCE(!vmx->nested.virtual_apic_page))
 		WARN_ON_ONCE(!vmx->nested.virtual_apic_page))
 		return false;
 		return false;
 
 
-	rvi = vmcs_read16(GUEST_INTR_STATUS) & 0xff;
+	rvi = vmx_get_rvi();
 
 
 	vapic_page = kmap(vmx->nested.virtual_apic_page);
 	vapic_page = kmap(vmx->nested.virtual_apic_page);
 	vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
 	vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
@@ -10245,15 +10223,16 @@ static void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
 	if (!lapic_in_kernel(vcpu))
 	if (!lapic_in_kernel(vcpu))
 		return;
 		return;
 
 
+	if (!flexpriority_enabled &&
+	    !cpu_has_vmx_virtualize_x2apic_mode())
+		return;
+
 	/* Postpone execution until vmcs01 is the current VMCS. */
 	/* Postpone execution until vmcs01 is the current VMCS. */
 	if (is_guest_mode(vcpu)) {
 	if (is_guest_mode(vcpu)) {
 		to_vmx(vcpu)->nested.change_vmcs01_virtual_apic_mode = true;
 		to_vmx(vcpu)->nested.change_vmcs01_virtual_apic_mode = true;
 		return;
 		return;
 	}
 	}
 
 
-	if (!cpu_need_tpr_shadow(vcpu))
-		return;
-
 	sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
 	sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
 	sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
 	sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
 			      SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
 			      SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
@@ -10375,6 +10354,14 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
 	return max_irr;
 	return max_irr;
 }
 }
 
 
+static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
+{
+	u8 rvi = vmx_get_rvi();
+	u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
+
+	return ((rvi & 0xf0) > (vppr & 0xf0));
+}
+
 static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
 static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
 {
 {
 	if (!kvm_vcpu_apicv_active(vcpu))
 	if (!kvm_vcpu_apicv_active(vcpu))
@@ -11264,6 +11251,23 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
 #undef cr4_fixed1_update
 #undef cr4_fixed1_update
 }
 }
 
 
+static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
+{
+	struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+	if (kvm_mpx_supported()) {
+		bool mpx_enabled = guest_cpuid_has(vcpu, X86_FEATURE_MPX);
+
+		if (mpx_enabled) {
+			vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
+			vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
+		} else {
+			vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS;
+			vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS;
+		}
+	}
+}
+
 static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
 static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
 {
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -11280,8 +11284,10 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
 		to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
 		to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
 			~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
 			~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
 
 
-	if (nested_vmx_allowed(vcpu))
+	if (nested_vmx_allowed(vcpu)) {
 		nested_vmx_cr_fixed1_bits_update(vcpu);
 		nested_vmx_cr_fixed1_bits_update(vcpu);
+		nested_vmx_entry_exit_ctls_update(vcpu);
+	}
 }
 }
 
 
 static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
 static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
@@ -12049,8 +12055,13 @@ static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
 
 	set_cr4_guest_host_mask(vmx);
 	set_cr4_guest_host_mask(vmx);
 
 
-	if (vmx_mpx_supported())
-		vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
+	if (kvm_mpx_supported()) {
+		if (vmx->nested.nested_run_pending &&
+			(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
+			vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
+		else
+			vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
+	}
 
 
 	if (enable_vpid) {
 	if (enable_vpid) {
 		if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
 		if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
@@ -12595,15 +12606,21 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
 	bool from_vmentry = !!exit_qual;
 	bool from_vmentry = !!exit_qual;
 	u32 dummy_exit_qual;
 	u32 dummy_exit_qual;
-	u32 vmcs01_cpu_exec_ctrl;
+	bool evaluate_pending_interrupts;
 	int r = 0;
 	int r = 0;
 
 
-	vmcs01_cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
+	evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
+		(CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING);
+	if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
+		evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
 
 
 	enter_guest_mode(vcpu);
 	enter_guest_mode(vcpu);
 
 
 	if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
 	if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
 		vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
 		vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
+	if (kvm_mpx_supported() &&
+		!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
+		vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
 
 
 	vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
 	vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
 	vmx_segment_cache_clear(vmx);
 	vmx_segment_cache_clear(vmx);
@@ -12643,16 +12660,14 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
 	 * to L1 or delivered directly to L2 (e.g. In case L1 don't
 	 * to L1 or delivered directly to L2 (e.g. In case L1 don't
 	 * intercept EXTERNAL_INTERRUPT).
 	 * intercept EXTERNAL_INTERRUPT).
 	 *
 	 *
-	 * Usually this would be handled by L0 requesting a
-	 * IRQ/NMI window by setting VMCS accordingly. However,
-	 * this setting was done on VMCS01 and now VMCS02 is active
-	 * instead. Thus, we force L0 to perform pending event
-	 * evaluation by requesting a KVM_REQ_EVENT.
-	 */
-	if (vmcs01_cpu_exec_ctrl &
-		(CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING)) {
+	 * Usually this would be handled by the processor noticing an
+	 * IRQ/NMI window request, or checking RVI during evaluation of
+	 * pending virtual interrupts.  However, this setting was done
+	 * on VMCS01 and now VMCS02 is active instead. Thus, we force L0
+	 * to perform pending event evaluation by requesting a KVM_REQ_EVENT.
+	 */
+	if (unlikely(evaluate_pending_interrupts))
 		kvm_make_request(KVM_REQ_EVENT, vcpu);
 		kvm_make_request(KVM_REQ_EVENT, vcpu);
-	}
 
 
 	/*
 	/*
 	 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
 	 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point

+ 1 - 1
arch/x86/kvm/x86.c

@@ -4698,7 +4698,7 @@ static void kvm_init_msr_list(void)
 		 */
 		 */
 		switch (msrs_to_save[i]) {
 		switch (msrs_to_save[i]) {
 		case MSR_IA32_BNDCFGS:
 		case MSR_IA32_BNDCFGS:
-			if (!kvm_x86_ops->mpx_supported())
+			if (!kvm_mpx_supported())
 				continue;
 				continue;
 			break;
 			break;
 		case MSR_TSC_AUX:
 		case MSR_TSC_AUX:

+ 5 - 2
drivers/base/firmware_loader/main.c

@@ -226,8 +226,11 @@ static int alloc_lookup_fw_priv(const char *fw_name,
 	}
 	}
 
 
 	tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size);
 	tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size);
-	if (tmp && !(opt_flags & FW_OPT_NOCACHE))
-		list_add(&tmp->list, &fwc->head);
+	if (tmp) {
+		INIT_LIST_HEAD(&tmp->list);
+		if (!(opt_flags & FW_OPT_NOCACHE))
+			list_add(&tmp->list, &fwc->head);
+	}
 	spin_unlock(&fwc->lock);
 	spin_unlock(&fwc->lock);
 
 
 	*fw_priv = tmp;
 	*fw_priv = tmp;

+ 4 - 1
drivers/base/power/main.c

@@ -1713,8 +1713,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
 
 
 	dpm_wait_for_subordinate(dev, async);
 	dpm_wait_for_subordinate(dev, async);
 
 
-	if (async_error)
+	if (async_error) {
+		dev->power.direct_complete = false;
 		goto Complete;
 		goto Complete;
+	}
 
 
 	/*
 	/*
 	 * If a device configured to wake up the system from sleep states
 	 * If a device configured to wake up the system from sleep states
@@ -1726,6 +1728,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
 		pm_wakeup_event(dev, 0);
 		pm_wakeup_event(dev, 0);
 
 
 	if (pm_wakeup_pending()) {
 	if (pm_wakeup_pending()) {
+		dev->power.direct_complete = false;
 		async_error = -EBUSY;
 		async_error = -EBUSY;
 		goto Complete;
 		goto Complete;
 	}
 	}

+ 4 - 4
drivers/crypto/caam/caamalg.c

@@ -1553,8 +1553,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
 	edesc->src_nents = src_nents;
 	edesc->src_nents = src_nents;
 	edesc->dst_nents = dst_nents;
 	edesc->dst_nents = dst_nents;
 	edesc->sec4_sg_bytes = sec4_sg_bytes;
 	edesc->sec4_sg_bytes = sec4_sg_bytes;
-	edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
-			 desc_bytes;
+	edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
+						  desc_bytes);
 	edesc->iv_dir = DMA_TO_DEVICE;
 	edesc->iv_dir = DMA_TO_DEVICE;
 
 
 	/* Make sure IV is located in a DMAable area */
 	/* Make sure IV is located in a DMAable area */
@@ -1757,8 +1757,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
 	edesc->src_nents = src_nents;
 	edesc->src_nents = src_nents;
 	edesc->dst_nents = dst_nents;
 	edesc->dst_nents = dst_nents;
 	edesc->sec4_sg_bytes = sec4_sg_bytes;
 	edesc->sec4_sg_bytes = sec4_sg_bytes;
-	edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
-			 desc_bytes;
+	edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
+						  desc_bytes);
 	edesc->iv_dir = DMA_FROM_DEVICE;
 	edesc->iv_dir = DMA_FROM_DEVICE;
 
 
 	/* Make sure IV is located in a DMAable area */
 	/* Make sure IV is located in a DMAable area */

+ 22 - 10
drivers/crypto/chelsio/chcr_algo.c

@@ -367,7 +367,8 @@ static inline void dsgl_walk_init(struct dsgl_walk *walk,
 	walk->to = (struct phys_sge_pairs *)(dsgl + 1);
 	walk->to = (struct phys_sge_pairs *)(dsgl + 1);
 }
 }
 
 
-static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
+static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
+				 int pci_chan_id)
 {
 {
 	struct cpl_rx_phys_dsgl *phys_cpl;
 	struct cpl_rx_phys_dsgl *phys_cpl;
 
 
@@ -385,6 +386,7 @@ static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
 	phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
 	phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
 	phys_cpl->rss_hdr_int.qid = htons(qid);
 	phys_cpl->rss_hdr_int.qid = htons(qid);
 	phys_cpl->rss_hdr_int.hash_val = 0;
 	phys_cpl->rss_hdr_int.hash_val = 0;
+	phys_cpl->rss_hdr_int.channel = pci_chan_id;
 }
 }
 
 
 static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
 static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
@@ -718,7 +720,7 @@ static inline void create_wreq(struct chcr_context *ctx,
 		FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
 		FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
 				!!lcb, ctx->tx_qidx);
 				!!lcb, ctx->tx_qidx);
 
 
-	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
+	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
 						       qid);
 						       qid);
 	chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
 	chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
 				     ((sizeof(chcr_req->wreq)) >> 4)));
 				     ((sizeof(chcr_req->wreq)) >> 4)));
@@ -1339,16 +1341,23 @@ static int chcr_device_init(struct chcr_context *ctx)
 				    adap->vres.ncrypto_fc);
 				    adap->vres.ncrypto_fc);
 		rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
 		rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
 		txq_perchan = ntxq / u_ctx->lldi.nchan;
 		txq_perchan = ntxq / u_ctx->lldi.nchan;
-		rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
-		rxq_idx += id % rxq_perchan;
-		txq_idx = ctx->dev->tx_channel_id * txq_perchan;
-		txq_idx += id % txq_perchan;
 		spin_lock(&ctx->dev->lock_chcr_dev);
 		spin_lock(&ctx->dev->lock_chcr_dev);
-		ctx->rx_qidx = rxq_idx;
-		ctx->tx_qidx = txq_idx;
+		ctx->tx_chan_id = ctx->dev->tx_channel_id;
 		ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
 		ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
 		ctx->dev->rx_channel_id = 0;
 		ctx->dev->rx_channel_id = 0;
 		spin_unlock(&ctx->dev->lock_chcr_dev);
 		spin_unlock(&ctx->dev->lock_chcr_dev);
+		rxq_idx = ctx->tx_chan_id * rxq_perchan;
+		rxq_idx += id % rxq_perchan;
+		txq_idx = ctx->tx_chan_id * txq_perchan;
+		txq_idx += id % txq_perchan;
+		ctx->rx_qidx = rxq_idx;
+		ctx->tx_qidx = txq_idx;
+		/* Channel Id used by SGE to forward packet to Host.
+		 * Same value should be used in cpl_fw6_pld RSS_CH field
+		 * by FW. Driver programs PCI channel ID to be used in fw
+		 * at the time of queue allocation with value "pi->tx_chan"
+		 */
+		ctx->pci_chan_id = txq_idx / txq_perchan;
 	}
 	}
 out:
 out:
 	return err;
 	return err;
@@ -2503,6 +2512,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct dsgl_walk dsgl_walk;
 	struct dsgl_walk dsgl_walk;
 	unsigned int authsize = crypto_aead_authsize(tfm);
 	unsigned int authsize = crypto_aead_authsize(tfm);
+	struct chcr_context *ctx = a_ctx(tfm);
 	u32 temp;
 	u32 temp;
 
 
 	dsgl_walk_init(&dsgl_walk, phys_cpl);
 	dsgl_walk_init(&dsgl_walk, phys_cpl);
@@ -2512,7 +2522,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
 	dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
 	dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
 	temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
 	temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
 	dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
 	dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
-	dsgl_walk_end(&dsgl_walk, qid);
+	dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
 }
 }
 
 
 void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
 void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
@@ -2544,6 +2554,8 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
 			     unsigned short qid)
 			     unsigned short qid)
 {
 {
 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
+	struct chcr_context *ctx = c_ctx(tfm);
 	struct dsgl_walk dsgl_walk;
 	struct dsgl_walk dsgl_walk;
 
 
 	dsgl_walk_init(&dsgl_walk, phys_cpl);
 	dsgl_walk_init(&dsgl_walk, phys_cpl);
@@ -2552,7 +2564,7 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
 	reqctx->dstsg = dsgl_walk.last_sg;
 	reqctx->dstsg = dsgl_walk.last_sg;
 	reqctx->dst_ofst = dsgl_walk.last_sg_len;
 	reqctx->dst_ofst = dsgl_walk.last_sg_len;
 
 
-	dsgl_walk_end(&dsgl_walk, qid);
+	dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
 }
 }
 
 
 void chcr_add_hash_src_ent(struct ahash_request *req,
 void chcr_add_hash_src_ent(struct ahash_request *req,

+ 2 - 0
drivers/crypto/chelsio/chcr_crypto.h

@@ -255,6 +255,8 @@ struct chcr_context {
 	struct chcr_dev *dev;
 	struct chcr_dev *dev;
 	unsigned char tx_qidx;
 	unsigned char tx_qidx;
 	unsigned char rx_qidx;
 	unsigned char rx_qidx;
+	unsigned char tx_chan_id;
+	unsigned char pci_chan_id;
 	struct __crypto_ctx crypto_ctx[0];
 	struct __crypto_ctx crypto_ctx[0];
 };
 };
 
 

+ 30 - 23
drivers/crypto/mxs-dcp.c

@@ -63,7 +63,7 @@ struct dcp {
 	struct dcp_coherent_block	*coh;
 	struct dcp_coherent_block	*coh;
 
 
 	struct completion		completion[DCP_MAX_CHANS];
 	struct completion		completion[DCP_MAX_CHANS];
-	struct mutex			mutex[DCP_MAX_CHANS];
+	spinlock_t			lock[DCP_MAX_CHANS];
 	struct task_struct		*thread[DCP_MAX_CHANS];
 	struct task_struct		*thread[DCP_MAX_CHANS];
 	struct crypto_queue		queue[DCP_MAX_CHANS];
 	struct crypto_queue		queue[DCP_MAX_CHANS];
 };
 };
@@ -349,13 +349,20 @@ static int dcp_chan_thread_aes(void *data)
 
 
 	int ret;
 	int ret;
 
 
-	do {
-		__set_current_state(TASK_INTERRUPTIBLE);
+	while (!kthread_should_stop()) {
+		set_current_state(TASK_INTERRUPTIBLE);
 
 
-		mutex_lock(&sdcp->mutex[chan]);
+		spin_lock(&sdcp->lock[chan]);
 		backlog = crypto_get_backlog(&sdcp->queue[chan]);
 		backlog = crypto_get_backlog(&sdcp->queue[chan]);
 		arq = crypto_dequeue_request(&sdcp->queue[chan]);
 		arq = crypto_dequeue_request(&sdcp->queue[chan]);
-		mutex_unlock(&sdcp->mutex[chan]);
+		spin_unlock(&sdcp->lock[chan]);
+
+		if (!backlog && !arq) {
+			schedule();
+			continue;
+		}
+
+		set_current_state(TASK_RUNNING);
 
 
 		if (backlog)
 		if (backlog)
 			backlog->complete(backlog, -EINPROGRESS);
 			backlog->complete(backlog, -EINPROGRESS);
@@ -363,11 +370,8 @@ static int dcp_chan_thread_aes(void *data)
 		if (arq) {
 		if (arq) {
 			ret = mxs_dcp_aes_block_crypt(arq);
 			ret = mxs_dcp_aes_block_crypt(arq);
 			arq->complete(arq, ret);
 			arq->complete(arq, ret);
-			continue;
 		}
 		}
-
-		schedule();
-	} while (!kthread_should_stop());
+	}
 
 
 	return 0;
 	return 0;
 }
 }
@@ -409,9 +413,9 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
 	rctx->ecb = ecb;
 	rctx->ecb = ecb;
 	actx->chan = DCP_CHAN_CRYPTO;
 	actx->chan = DCP_CHAN_CRYPTO;
 
 
-	mutex_lock(&sdcp->mutex[actx->chan]);
+	spin_lock(&sdcp->lock[actx->chan]);
 	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
 	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
-	mutex_unlock(&sdcp->mutex[actx->chan]);
+	spin_unlock(&sdcp->lock[actx->chan]);
 
 
 	wake_up_process(sdcp->thread[actx->chan]);
 	wake_up_process(sdcp->thread[actx->chan]);
 
 
@@ -640,13 +644,20 @@ static int dcp_chan_thread_sha(void *data)
 	struct ahash_request *req;
 	struct ahash_request *req;
 	int ret, fini;
 	int ret, fini;
 
 
-	do {
-		__set_current_state(TASK_INTERRUPTIBLE);
+	while (!kthread_should_stop()) {
+		set_current_state(TASK_INTERRUPTIBLE);
 
 
-		mutex_lock(&sdcp->mutex[chan]);
+		spin_lock(&sdcp->lock[chan]);
 		backlog = crypto_get_backlog(&sdcp->queue[chan]);
 		backlog = crypto_get_backlog(&sdcp->queue[chan]);
 		arq = crypto_dequeue_request(&sdcp->queue[chan]);
 		arq = crypto_dequeue_request(&sdcp->queue[chan]);
-		mutex_unlock(&sdcp->mutex[chan]);
+		spin_unlock(&sdcp->lock[chan]);
+
+		if (!backlog && !arq) {
+			schedule();
+			continue;
+		}
+
+		set_current_state(TASK_RUNNING);
 
 
 		if (backlog)
 		if (backlog)
 			backlog->complete(backlog, -EINPROGRESS);
 			backlog->complete(backlog, -EINPROGRESS);
@@ -658,12 +669,8 @@ static int dcp_chan_thread_sha(void *data)
 			ret = dcp_sha_req_to_buf(arq);
 			ret = dcp_sha_req_to_buf(arq);
 			fini = rctx->fini;
 			fini = rctx->fini;
 			arq->complete(arq, ret);
 			arq->complete(arq, ret);
-			if (!fini)
-				continue;
 		}
 		}
-
-		schedule();
-	} while (!kthread_should_stop());
+	}
 
 
 	return 0;
 	return 0;
 }
 }
@@ -721,9 +728,9 @@ static int dcp_sha_update_fx(struct ahash_request *req, int fini)
 		rctx->init = 1;
 		rctx->init = 1;
 	}
 	}
 
 
-	mutex_lock(&sdcp->mutex[actx->chan]);
+	spin_lock(&sdcp->lock[actx->chan]);
 	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
 	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
-	mutex_unlock(&sdcp->mutex[actx->chan]);
+	spin_unlock(&sdcp->lock[actx->chan]);
 
 
 	wake_up_process(sdcp->thread[actx->chan]);
 	wake_up_process(sdcp->thread[actx->chan]);
 	mutex_unlock(&actx->mutex);
 	mutex_unlock(&actx->mutex);
@@ -997,7 +1004,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
 	platform_set_drvdata(pdev, sdcp);
 	platform_set_drvdata(pdev, sdcp);
 
 
 	for (i = 0; i < DCP_MAX_CHANS; i++) {
 	for (i = 0; i < DCP_MAX_CHANS; i++) {
-		mutex_init(&sdcp->mutex[i]);
+		spin_lock_init(&sdcp->lock[i]);
 		init_completion(&sdcp->completion[i]);
 		init_completion(&sdcp->completion[i]);
 		crypto_init_queue(&sdcp->queue[i], 50);
 		crypto_init_queue(&sdcp->queue[i], 50);
 	}
 	}

+ 3 - 3
drivers/crypto/qat/qat_c3xxx/adf_drv.c

@@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	struct adf_hw_device_data *hw_data;
 	struct adf_hw_device_data *hw_data;
 	char name[ADF_DEVICE_NAME_LENGTH];
 	char name[ADF_DEVICE_NAME_LENGTH];
 	unsigned int i, bar_nr;
 	unsigned int i, bar_nr;
-	int ret, bar_mask;
+	unsigned long bar_mask;
+	int ret;
 
 
 	switch (ent->device) {
 	switch (ent->device) {
 	case ADF_C3XXX_PCI_DEVICE_ID:
 	case ADF_C3XXX_PCI_DEVICE_ID:
@@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	/* Find and map all the device's BARS */
 	/* Find and map all the device's BARS */
 	i = 0;
 	i = 0;
 	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
 	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-	for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-			 ADF_PCI_MAX_BARS * 2) {
+	for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
 		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
 
 		bar->base_addr = pci_resource_start(pdev, bar_nr);
 		bar->base_addr = pci_resource_start(pdev, bar_nr);

+ 3 - 3
drivers/crypto/qat/qat_c3xxxvf/adf_drv.c

@@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	struct adf_hw_device_data *hw_data;
 	struct adf_hw_device_data *hw_data;
 	char name[ADF_DEVICE_NAME_LENGTH];
 	char name[ADF_DEVICE_NAME_LENGTH];
 	unsigned int i, bar_nr;
 	unsigned int i, bar_nr;
-	int ret, bar_mask;
+	unsigned long bar_mask;
+	int ret;
 
 
 	switch (ent->device) {
 	switch (ent->device) {
 	case ADF_C3XXXIOV_PCI_DEVICE_ID:
 	case ADF_C3XXXIOV_PCI_DEVICE_ID:
@@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	/* Find and map all the device's BARS */
 	/* Find and map all the device's BARS */
 	i = 0;
 	i = 0;
 	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
 	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-	for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-			 ADF_PCI_MAX_BARS * 2) {
+	for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
 		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
 
 		bar->base_addr = pci_resource_start(pdev, bar_nr);
 		bar->base_addr = pci_resource_start(pdev, bar_nr);

+ 3 - 3
drivers/crypto/qat/qat_c62x/adf_drv.c

@@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	struct adf_hw_device_data *hw_data;
 	struct adf_hw_device_data *hw_data;
 	char name[ADF_DEVICE_NAME_LENGTH];
 	char name[ADF_DEVICE_NAME_LENGTH];
 	unsigned int i, bar_nr;
 	unsigned int i, bar_nr;
-	int ret, bar_mask;
+	unsigned long bar_mask;
+	int ret;
 
 
 	switch (ent->device) {
 	switch (ent->device) {
 	case ADF_C62X_PCI_DEVICE_ID:
 	case ADF_C62X_PCI_DEVICE_ID:
@@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	/* Find and map all the device's BARS */
 	/* Find and map all the device's BARS */
 	i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
 	i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
 	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
 	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-	for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-			 ADF_PCI_MAX_BARS * 2) {
+	for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
 		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
 
 		bar->base_addr = pci_resource_start(pdev, bar_nr);
 		bar->base_addr = pci_resource_start(pdev, bar_nr);

+ 3 - 3
drivers/crypto/qat/qat_c62xvf/adf_drv.c

@@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	struct adf_hw_device_data *hw_data;
 	struct adf_hw_device_data *hw_data;
 	char name[ADF_DEVICE_NAME_LENGTH];
 	char name[ADF_DEVICE_NAME_LENGTH];
 	unsigned int i, bar_nr;
 	unsigned int i, bar_nr;
-	int ret, bar_mask;
+	unsigned long bar_mask;
+	int ret;
 
 
 	switch (ent->device) {
 	switch (ent->device) {
 	case ADF_C62XIOV_PCI_DEVICE_ID:
 	case ADF_C62XIOV_PCI_DEVICE_ID:
@@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	/* Find and map all the device's BARS */
 	/* Find and map all the device's BARS */
 	i = 0;
 	i = 0;
 	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
 	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-	for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-			 ADF_PCI_MAX_BARS * 2) {
+	for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
 		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
 
 		bar->base_addr = pci_resource_start(pdev, bar_nr);
 		bar->base_addr = pci_resource_start(pdev, bar_nr);

+ 3 - 3
drivers/crypto/qat/qat_dh895xcc/adf_drv.c

@@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	struct adf_hw_device_data *hw_data;
 	struct adf_hw_device_data *hw_data;
 	char name[ADF_DEVICE_NAME_LENGTH];
 	char name[ADF_DEVICE_NAME_LENGTH];
 	unsigned int i, bar_nr;
 	unsigned int i, bar_nr;
-	int ret, bar_mask;
+	unsigned long bar_mask;
+	int ret;
 
 
 	switch (ent->device) {
 	switch (ent->device) {
 	case ADF_DH895XCC_PCI_DEVICE_ID:
 	case ADF_DH895XCC_PCI_DEVICE_ID:
@@ -237,8 +238,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	/* Find and map all the device's BARS */
 	/* Find and map all the device's BARS */
 	i = 0;
 	i = 0;
 	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
 	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-	for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-			 ADF_PCI_MAX_BARS * 2) {
+	for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
 		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
 
 		bar->base_addr = pci_resource_start(pdev, bar_nr);
 		bar->base_addr = pci_resource_start(pdev, bar_nr);

+ 3 - 3
drivers/crypto/qat/qat_dh895xccvf/adf_drv.c

@@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	struct adf_hw_device_data *hw_data;
 	struct adf_hw_device_data *hw_data;
 	char name[ADF_DEVICE_NAME_LENGTH];
 	char name[ADF_DEVICE_NAME_LENGTH];
 	unsigned int i, bar_nr;
 	unsigned int i, bar_nr;
-	int ret, bar_mask;
+	unsigned long bar_mask;
+	int ret;
 
 
 	switch (ent->device) {
 	switch (ent->device) {
 	case ADF_DH895XCCIOV_PCI_DEVICE_ID:
 	case ADF_DH895XCCIOV_PCI_DEVICE_ID:
@@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	/* Find and map all the device's BARS */
 	/* Find and map all the device's BARS */
 	i = 0;
 	i = 0;
 	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
 	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-	for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-			 ADF_PCI_MAX_BARS * 2) {
+	for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
 		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
 
 		bar->base_addr = pci_resource_start(pdev, bar_nr);
 		bar->base_addr = pci_resource_start(pdev, bar_nr);

+ 3 - 1
drivers/fpga/dfl-fme-region.c

@@ -14,6 +14,7 @@
  */
  */
 
 
 #include <linux/module.h>
 #include <linux/module.h>
+#include <linux/fpga/fpga-mgr.h>
 #include <linux/fpga/fpga-region.h>
 #include <linux/fpga/fpga-region.h>
 
 
 #include "dfl-fme-pr.h"
 #include "dfl-fme-pr.h"
@@ -66,9 +67,10 @@ eprobe_mgr_put:
 static int fme_region_remove(struct platform_device *pdev)
 static int fme_region_remove(struct platform_device *pdev)
 {
 {
 	struct fpga_region *region = dev_get_drvdata(&pdev->dev);
 	struct fpga_region *region = dev_get_drvdata(&pdev->dev);
+	struct fpga_manager *mgr = region->mgr;
 
 
 	fpga_region_unregister(region);
 	fpga_region_unregister(region);
-	fpga_mgr_put(region->mgr);
+	fpga_mgr_put(mgr);
 
 
 	return 0;
 	return 0;
 }
 }

+ 1 - 1
drivers/fpga/fpga-bridge.c

@@ -125,7 +125,7 @@ static int fpga_bridge_dev_match(struct device *dev, const void *data)
  *
  *
  * Given a device, get an exclusive reference to a fpga bridge.
  * Given a device, get an exclusive reference to a fpga bridge.
  *
  *
- * Return: fpga manager struct or IS_ERR() condition containing error code.
+ * Return: fpga bridge struct or IS_ERR() condition containing error code.
  */
  */
 struct fpga_bridge *fpga_bridge_get(struct device *dev,
 struct fpga_bridge *fpga_bridge_get(struct device *dev,
 				    struct fpga_image_info *info)
 				    struct fpga_image_info *info)

+ 2 - 1
drivers/fpga/of-fpga-region.c

@@ -437,9 +437,10 @@ eprobe_mgr_put:
 static int of_fpga_region_remove(struct platform_device *pdev)
 static int of_fpga_region_remove(struct platform_device *pdev)
 {
 {
 	struct fpga_region *region = platform_get_drvdata(pdev);
 	struct fpga_region *region = platform_get_drvdata(pdev);
+	struct fpga_manager *mgr = region->mgr;
 
 
 	fpga_region_unregister(region);
 	fpga_region_unregister(region);
-	fpga_mgr_put(region->mgr);
+	fpga_mgr_put(mgr);
 
 
 	return 0;
 	return 0;
 }
 }

+ 1 - 1
drivers/gpio/gpiolib.c

@@ -571,7 +571,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
 		if (ret)
 		if (ret)
 			goto out_free_descs;
 			goto out_free_descs;
 		lh->descs[i] = desc;
 		lh->descs[i] = desc;
-		count = i;
+		count = i + 1;
 
 
 		if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
 		if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
 			set_bit(FLAG_ACTIVE_LOW, &desc->flags);
 			set_bit(FLAG_ACTIVE_LOW, &desc->flags);

+ 29 - 8
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c

@@ -358,8 +358,8 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
 					struct queue *q,
 					struct queue *q,
 					struct qcm_process_device *qpd)
 					struct qcm_process_device *qpd)
 {
 {
-	int retval;
 	struct mqd_manager *mqd_mgr;
 	struct mqd_manager *mqd_mgr;
+	int retval;
 
 
 	mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
 	mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
 	if (!mqd_mgr)
 	if (!mqd_mgr)
@@ -387,8 +387,12 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
 	if (!q->properties.is_active)
 	if (!q->properties.is_active)
 		return 0;
 		return 0;
 
 
-	retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
-			&q->properties, q->process->mm);
+	if (WARN(q->process->mm != current->mm,
+		 "should only run in user thread"))
+		retval = -EFAULT;
+	else
+		retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
+					   &q->properties, current->mm);
 	if (retval)
 	if (retval)
 		goto out_uninit_mqd;
 		goto out_uninit_mqd;
 
 
@@ -545,9 +549,15 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
 		retval = map_queues_cpsch(dqm);
 		retval = map_queues_cpsch(dqm);
 	else if (q->properties.is_active &&
 	else if (q->properties.is_active &&
 		 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
 		 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
-		  q->properties.type == KFD_QUEUE_TYPE_SDMA))
-		retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
-				       &q->properties, q->process->mm);
+		  q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
+		if (WARN(q->process->mm != current->mm,
+			 "should only run in user thread"))
+			retval = -EFAULT;
+		else
+			retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
+						   q->pipe, q->queue,
+						   &q->properties, current->mm);
+	}
 
 
 out_unlock:
 out_unlock:
 	dqm_unlock(dqm);
 	dqm_unlock(dqm);
@@ -653,6 +663,7 @@ out:
 static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
 static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
 					  struct qcm_process_device *qpd)
 					  struct qcm_process_device *qpd)
 {
 {
+	struct mm_struct *mm = NULL;
 	struct queue *q;
 	struct queue *q;
 	struct mqd_manager *mqd_mgr;
 	struct mqd_manager *mqd_mgr;
 	struct kfd_process_device *pdd;
 	struct kfd_process_device *pdd;
@@ -686,6 +697,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
 		kfd_flush_tlb(pdd);
 		kfd_flush_tlb(pdd);
 	}
 	}
 
 
+	/* Take a safe reference to the mm_struct, which may otherwise
+	 * disappear even while the kfd_process is still referenced.
+	 */
+	mm = get_task_mm(pdd->process->lead_thread);
+	if (!mm) {
+		retval = -EFAULT;
+		goto out;
+	}
+
 	/* activate all active queues on the qpd */
 	/* activate all active queues on the qpd */
 	list_for_each_entry(q, &qpd->queues_list, list) {
 	list_for_each_entry(q, &qpd->queues_list, list) {
 		if (!q->properties.is_evicted)
 		if (!q->properties.is_evicted)
@@ -700,14 +720,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
 		q->properties.is_evicted = false;
 		q->properties.is_evicted = false;
 		q->properties.is_active = true;
 		q->properties.is_active = true;
 		retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
 		retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
-				       q->queue, &q->properties,
-				       q->process->mm);
+				       q->queue, &q->properties, mm);
 		if (retval)
 		if (retval)
 			goto out;
 			goto out;
 		dqm->queue_count++;
 		dqm->queue_count++;
 	}
 	}
 	qpd->evicted = 0;
 	qpd->evicted = 0;
 out:
 out:
+	if (mm)
+		mmput(mm);
 	dqm_unlock(dqm);
 	dqm_unlock(dqm);
 	return retval;
 	return retval;
 }
 }

+ 8 - 2
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c

@@ -4633,12 +4633,18 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 	}
 	}
 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 
 
-	/* Signal HW programming completion */
-	drm_atomic_helper_commit_hw_done(state);
 
 
 	if (wait_for_vblank)
 	if (wait_for_vblank)
 		drm_atomic_helper_wait_for_flip_done(dev, state);
 		drm_atomic_helper_wait_for_flip_done(dev, state);
 
 
+	/*
+	 * FIXME:
+	 * Delay hw_done() until flip_done() is signaled. This is to block
+	 * another commit from freeing the CRTC state while we're still
+	 * waiting on flip_done.
+	 */
+	drm_atomic_helper_commit_hw_done(state);
+
 	drm_atomic_helper_cleanup_planes(dev, state);
 	drm_atomic_helper_cleanup_planes(dev, state);
 
 
 	/* Finally, drop a runtime PM reference for each newly disabled CRTC,
 	/* Finally, drop a runtime PM reference for each newly disabled CRTC,

+ 26 - 9
drivers/gpu/drm/drm_client.c

@@ -63,20 +63,21 @@ static void drm_client_close(struct drm_client_dev *client)
 EXPORT_SYMBOL(drm_client_close);
 EXPORT_SYMBOL(drm_client_close);
 
 
 /**
 /**
- * drm_client_new - Create a DRM client
+ * drm_client_init - Initialise a DRM client
  * @dev: DRM device
  * @dev: DRM device
  * @client: DRM client
  * @client: DRM client
  * @name: Client name
  * @name: Client name
  * @funcs: DRM client functions (optional)
  * @funcs: DRM client functions (optional)
  *
  *
+ * This initialises the client and opens a &drm_file. Use drm_client_add() to complete the process.
  * The caller needs to hold a reference on @dev before calling this function.
  * The caller needs to hold a reference on @dev before calling this function.
  * The client is freed when the &drm_device is unregistered. See drm_client_release().
  * The client is freed when the &drm_device is unregistered. See drm_client_release().
  *
  *
  * Returns:
  * Returns:
  * Zero on success or negative error code on failure.
  * Zero on success or negative error code on failure.
  */
  */
-int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
-		   const char *name, const struct drm_client_funcs *funcs)
+int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
+		    const char *name, const struct drm_client_funcs *funcs)
 {
 {
 	int ret;
 	int ret;
 
 
@@ -95,10 +96,6 @@ int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
 	if (ret)
 	if (ret)
 		goto err_put_module;
 		goto err_put_module;
 
 
-	mutex_lock(&dev->clientlist_mutex);
-	list_add(&client->list, &dev->clientlist);
-	mutex_unlock(&dev->clientlist_mutex);
-
 	drm_dev_get(dev);
 	drm_dev_get(dev);
 
 
 	return 0;
 	return 0;
@@ -109,13 +106,33 @@ err_put_module:
 
 
 	return ret;
 	return ret;
 }
 }
-EXPORT_SYMBOL(drm_client_new);
+EXPORT_SYMBOL(drm_client_init);
+
+/**
+ * drm_client_add - Add client to the device list
+ * @client: DRM client
+ *
+ * Add the client to the &drm_device client list to activate its callbacks.
+ * @client must be initialized by a call to drm_client_init(). After
+ * drm_client_add() it is no longer permissible to call drm_client_release()
+ * directly (outside the unregister callback), instead cleanup will happen
+ * automatically on driver unload.
+ */
+void drm_client_add(struct drm_client_dev *client)
+{
+	struct drm_device *dev = client->dev;
+
+	mutex_lock(&dev->clientlist_mutex);
+	list_add(&client->list, &dev->clientlist);
+	mutex_unlock(&dev->clientlist_mutex);
+}
+EXPORT_SYMBOL(drm_client_add);
 
 
 /**
 /**
  * drm_client_release - Release DRM client resources
  * drm_client_release - Release DRM client resources
  * @client: DRM client
  * @client: DRM client
  *
  *
- * Releases resources by closing the &drm_file that was opened by drm_client_new().
+ * Releases resources by closing the &drm_file that was opened by drm_client_init().
  * It is called automatically if the &drm_client_funcs.unregister callback is _not_ set.
  * It is called automatically if the &drm_client_funcs.unregister callback is _not_ set.
  *
  *
  * This function should only be called from the unregister callback. An exception
  * This function should only be called from the unregister callback. An exception

+ 3 - 1
drivers/gpu/drm/drm_fb_cma_helper.c

@@ -160,7 +160,7 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
 
 
 	fb_helper = &fbdev_cma->fb_helper;
 	fb_helper = &fbdev_cma->fb_helper;
 
 
-	ret = drm_client_new(dev, &fb_helper->client, "fbdev", NULL);
+	ret = drm_client_init(dev, &fb_helper->client, "fbdev", NULL);
 	if (ret)
 	if (ret)
 		goto err_free;
 		goto err_free;
 
 
@@ -169,6 +169,8 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
 	if (ret)
 	if (ret)
 		goto err_client_put;
 		goto err_client_put;
 
 
+	drm_client_add(&fb_helper->client);
+
 	return fbdev_cma;
 	return fbdev_cma;
 
 
 err_client_put:
 err_client_put:

+ 3 - 1
drivers/gpu/drm/drm_fb_helper.c

@@ -3218,12 +3218,14 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
 	if (!fb_helper)
 	if (!fb_helper)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	ret = drm_client_new(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
+	ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
 	if (ret) {
 	if (ret) {
 		kfree(fb_helper);
 		kfree(fb_helper);
 		return ret;
 		return ret;
 	}
 	}
 
 
+	drm_client_add(&fb_helper->client);
+
 	fb_helper->preferred_bpp = preferred_bpp;
 	fb_helper->preferred_bpp = preferred_bpp;
 
 
 	drm_fbdev_client_hotplug(&fb_helper->client);
 	drm_fbdev_client_hotplug(&fb_helper->client);

+ 3 - 3
drivers/gpu/drm/drm_lease.c

@@ -566,14 +566,14 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
 	lessee_priv->is_master = 1;
 	lessee_priv->is_master = 1;
 	lessee_priv->authenticated = 1;
 	lessee_priv->authenticated = 1;
 
 
-	/* Hook up the fd */
-	fd_install(fd, lessee_file);
-
 	/* Pass fd back to userspace */
 	/* Pass fd back to userspace */
 	DRM_DEBUG_LEASE("Returning fd %d id %d\n", fd, lessee->lessee_id);
 	DRM_DEBUG_LEASE("Returning fd %d id %d\n", fd, lessee->lessee_id);
 	cl->fd = fd;
 	cl->fd = fd;
 	cl->lessee_id = lessee->lessee_id;
 	cl->lessee_id = lessee->lessee_id;
 
 
+	/* Hook up the fd */
+	fd_install(fd, lessee_file);
+
 	DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
 	DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
 	return 0;
 	return 0;
 
 

+ 6 - 28
drivers/gpu/drm/exynos/exynos_drm_iommu.h

@@ -55,37 +55,12 @@ static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
 static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
 static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
 					unsigned long start, unsigned long size)
 					unsigned long start, unsigned long size)
 {
 {
-	struct iommu_domain *domain;
-	int ret;
-
-	domain = iommu_domain_alloc(priv->dma_dev->bus);
-	if (!domain)
-		return -ENOMEM;
-
-	ret = iommu_get_dma_cookie(domain);
-	if (ret)
-		goto free_domain;
-
-	ret = iommu_dma_init_domain(domain, start, size, NULL);
-	if (ret)
-		goto put_cookie;
-
-	priv->mapping = domain;
+	priv->mapping = iommu_get_domain_for_dev(priv->dma_dev);
 	return 0;
 	return 0;
-
-put_cookie:
-	iommu_put_dma_cookie(domain);
-free_domain:
-	iommu_domain_free(domain);
-	return ret;
 }
 }
 
 
 static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv)
 static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv)
 {
 {
-	struct iommu_domain *domain = priv->mapping;
-
-	iommu_put_dma_cookie(domain);
-	iommu_domain_free(domain);
 	priv->mapping = NULL;
 	priv->mapping = NULL;
 }
 }
 
 
@@ -94,7 +69,9 @@ static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
 {
 {
 	struct iommu_domain *domain = priv->mapping;
 	struct iommu_domain *domain = priv->mapping;
 
 
-	return iommu_attach_device(domain, dev);
+	if (dev != priv->dma_dev)
+		return iommu_attach_device(domain, dev);
+	return 0;
 }
 }
 
 
 static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
 static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
@@ -102,7 +79,8 @@ static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
 {
 {
 	struct iommu_domain *domain = priv->mapping;
 	struct iommu_domain *domain = priv->mapping;
 
 
-	iommu_detach_device(domain, dev);
+	if (dev != priv->dma_dev)
+		iommu_detach_device(domain, dev);
 }
 }
 #else
 #else
 #error Unsupported architecture and IOMMU/DMA-mapping glue code
 #error Unsupported architecture and IOMMU/DMA-mapping glue code

+ 3 - 2
drivers/gpu/drm/i2c/tda9950.c

@@ -191,7 +191,8 @@ static irqreturn_t tda9950_irq(int irq, void *data)
 			break;
 			break;
 		}
 		}
 		/* TDA9950 executes all retries for us */
 		/* TDA9950 executes all retries for us */
-		tx_status |= CEC_TX_STATUS_MAX_RETRIES;
+		if (tx_status != CEC_TX_STATUS_OK)
+			tx_status |= CEC_TX_STATUS_MAX_RETRIES;
 		cec_transmit_done(priv->adap, tx_status, arb_lost_cnt,
 		cec_transmit_done(priv->adap, tx_status, arb_lost_cnt,
 				  nack_cnt, 0, err_cnt);
 				  nack_cnt, 0, err_cnt);
 		break;
 		break;
@@ -310,7 +311,7 @@ static void tda9950_release(struct tda9950_priv *priv)
 	/* Wait up to .5s for it to signal non-busy */
 	/* Wait up to .5s for it to signal non-busy */
 	do {
 	do {
 		csr = tda9950_read(client, REG_CSR);
 		csr = tda9950_read(client, REG_CSR);
-		if (!(csr & CSR_BUSY) || --timeout)
+		if (!(csr & CSR_BUSY) || !--timeout)
 			break;
 			break;
 		msleep(10);
 		msleep(10);
 	} while (1);
 	} while (1);

+ 63 - 25
drivers/gpu/drm/i915/i915_gpu_error.c

@@ -232,6 +232,20 @@ static bool compress_init(struct compress *c)
 	return true;
 	return true;
 }
 }
 
 
+static void *compress_next_page(struct drm_i915_error_object *dst)
+{
+	unsigned long page;
+
+	if (dst->page_count >= dst->num_pages)
+		return ERR_PTR(-ENOSPC);
+
+	page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
+	if (!page)
+		return ERR_PTR(-ENOMEM);
+
+	return dst->pages[dst->page_count++] = (void *)page;
+}
+
 static int compress_page(struct compress *c,
 static int compress_page(struct compress *c,
 			 void *src,
 			 void *src,
 			 struct drm_i915_error_object *dst)
 			 struct drm_i915_error_object *dst)
@@ -245,19 +259,14 @@ static int compress_page(struct compress *c,
 
 
 	do {
 	do {
 		if (zstream->avail_out == 0) {
 		if (zstream->avail_out == 0) {
-			unsigned long page;
-
-			page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
-			if (!page)
-				return -ENOMEM;
+			zstream->next_out = compress_next_page(dst);
+			if (IS_ERR(zstream->next_out))
+				return PTR_ERR(zstream->next_out);
 
 
-			dst->pages[dst->page_count++] = (void *)page;
-
-			zstream->next_out = (void *)page;
 			zstream->avail_out = PAGE_SIZE;
 			zstream->avail_out = PAGE_SIZE;
 		}
 		}
 
 
-		if (zlib_deflate(zstream, Z_SYNC_FLUSH) != Z_OK)
+		if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
 			return -EIO;
 			return -EIO;
 	} while (zstream->avail_in);
 	} while (zstream->avail_in);
 
 
@@ -268,19 +277,42 @@ static int compress_page(struct compress *c,
 	return 0;
 	return 0;
 }
 }
 
 
-static void compress_fini(struct compress *c,
+static int compress_flush(struct compress *c,
 			  struct drm_i915_error_object *dst)
 			  struct drm_i915_error_object *dst)
 {
 {
 	struct z_stream_s *zstream = &c->zstream;
 	struct z_stream_s *zstream = &c->zstream;
 
 
-	if (dst) {
-		zlib_deflate(zstream, Z_FINISH);
-		dst->unused = zstream->avail_out;
-	}
+	do {
+		switch (zlib_deflate(zstream, Z_FINISH)) {
+		case Z_OK: /* more space requested */
+			zstream->next_out = compress_next_page(dst);
+			if (IS_ERR(zstream->next_out))
+				return PTR_ERR(zstream->next_out);
+
+			zstream->avail_out = PAGE_SIZE;
+			break;
+
+		case Z_STREAM_END:
+			goto end;
+
+		default: /* any error */
+			return -EIO;
+		}
+	} while (1);
+
+end:
+	memset(zstream->next_out, 0, zstream->avail_out);
+	dst->unused = zstream->avail_out;
+	return 0;
+}
+
+static void compress_fini(struct compress *c,
+			  struct drm_i915_error_object *dst)
+{
+	struct z_stream_s *zstream = &c->zstream;
 
 
 	zlib_deflateEnd(zstream);
 	zlib_deflateEnd(zstream);
 	kfree(zstream->workspace);
 	kfree(zstream->workspace);
-
 	if (c->tmp)
 	if (c->tmp)
 		free_page((unsigned long)c->tmp);
 		free_page((unsigned long)c->tmp);
 }
 }
@@ -319,6 +351,12 @@ static int compress_page(struct compress *c,
 	return 0;
 	return 0;
 }
 }
 
 
+static int compress_flush(struct compress *c,
+			  struct drm_i915_error_object *dst)
+{
+	return 0;
+}
+
 static void compress_fini(struct compress *c,
 static void compress_fini(struct compress *c,
 			  struct drm_i915_error_object *dst)
 			  struct drm_i915_error_object *dst)
 {
 {
@@ -917,6 +955,7 @@ i915_error_object_create(struct drm_i915_private *i915,
 	unsigned long num_pages;
 	unsigned long num_pages;
 	struct sgt_iter iter;
 	struct sgt_iter iter;
 	dma_addr_t dma;
 	dma_addr_t dma;
+	int ret;
 
 
 	if (!vma)
 	if (!vma)
 		return NULL;
 		return NULL;
@@ -930,6 +969,7 @@ i915_error_object_create(struct drm_i915_private *i915,
 
 
 	dst->gtt_offset = vma->node.start;
 	dst->gtt_offset = vma->node.start;
 	dst->gtt_size = vma->node.size;
 	dst->gtt_size = vma->node.size;
+	dst->num_pages = num_pages;
 	dst->page_count = 0;
 	dst->page_count = 0;
 	dst->unused = 0;
 	dst->unused = 0;
 
 
@@ -938,28 +978,26 @@ i915_error_object_create(struct drm_i915_private *i915,
 		return NULL;
 		return NULL;
 	}
 	}
 
 
+	ret = -EINVAL;
 	for_each_sgt_dma(dma, iter, vma->pages) {
 	for_each_sgt_dma(dma, iter, vma->pages) {
 		void __iomem *s;
 		void __iomem *s;
-		int ret;
 
 
 		ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
 		ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
 
 
 		s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
 		s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
 		ret = compress_page(&compress, (void  __force *)s, dst);
 		ret = compress_page(&compress, (void  __force *)s, dst);
 		io_mapping_unmap_atomic(s);
 		io_mapping_unmap_atomic(s);
-
 		if (ret)
 		if (ret)
-			goto unwind;
+			break;
 	}
 	}
-	goto out;
 
 
-unwind:
-	while (dst->page_count--)
-		free_page((unsigned long)dst->pages[dst->page_count]);
-	kfree(dst);
-	dst = NULL;
+	if (ret || compress_flush(&compress, dst)) {
+		while (dst->page_count--)
+			free_page((unsigned long)dst->pages[dst->page_count]);
+		kfree(dst);
+		dst = NULL;
+	}
 
 
-out:
 	compress_fini(&compress, dst);
 	compress_fini(&compress, dst);
 	ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
 	ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
 	return dst;
 	return dst;

+ 1 - 0
drivers/gpu/drm/i915/i915_gpu_error.h

@@ -135,6 +135,7 @@ struct i915_gpu_state {
 		struct drm_i915_error_object {
 		struct drm_i915_error_object {
 			u64 gtt_offset;
 			u64 gtt_offset;
 			u64 gtt_size;
 			u64 gtt_size;
+			int num_pages;
 			int page_count;
 			int page_count;
 			int unused;
 			int unused;
 			u32 *pages[0];
 			u32 *pages[0];

+ 12 - 21
drivers/gpu/drm/i915/i915_irq.c

@@ -3091,36 +3091,27 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915,
 	spin_unlock(&i915->irq_lock);
 	spin_unlock(&i915->irq_lock);
 }
 }
 
 
-static void
-gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl,
-		      u32 *iir)
+static u32
+gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
 {
 {
 	void __iomem * const regs = dev_priv->regs;
 	void __iomem * const regs = dev_priv->regs;
+	u32 iir;
 
 
 	if (!(master_ctl & GEN11_GU_MISC_IRQ))
 	if (!(master_ctl & GEN11_GU_MISC_IRQ))
-		return;
+		return 0;
+
+	iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
+	if (likely(iir))
+		raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
 
 
-	*iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
-	if (likely(*iir))
-		raw_reg_write(regs, GEN11_GU_MISC_IIR, *iir);
+	return iir;
 }
 }
 
 
 static void
 static void
-gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv,
-			  const u32 master_ctl, const u32 iir)
+gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
 {
 {
-	if (!(master_ctl & GEN11_GU_MISC_IRQ))
-		return;
-
-	if (unlikely(!iir)) {
-		DRM_ERROR("GU_MISC iir blank!\n");
-		return;
-	}
-
 	if (iir & GEN11_GU_MISC_GSE)
 	if (iir & GEN11_GU_MISC_GSE)
 		intel_opregion_asle_intr(dev_priv);
 		intel_opregion_asle_intr(dev_priv);
-	else
-		DRM_ERROR("Unexpected GU_MISC interrupt 0x%x\n", iir);
 }
 }
 
 
 static irqreturn_t gen11_irq_handler(int irq, void *arg)
 static irqreturn_t gen11_irq_handler(int irq, void *arg)
@@ -3157,12 +3148,12 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
 		enable_rpm_wakeref_asserts(i915);
 		enable_rpm_wakeref_asserts(i915);
 	}
 	}
 
 
-	gen11_gu_misc_irq_ack(i915, master_ctl, &gu_misc_iir);
+	gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
 
 
 	/* Acknowledge and enable interrupts. */
 	/* Acknowledge and enable interrupts. */
 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
 
 
-	gen11_gu_misc_irq_handler(i915, master_ctl, gu_misc_iir);
+	gen11_gu_misc_irq_handler(i915, gu_misc_iir);
 
 
 	return IRQ_HANDLED;
 	return IRQ_HANDLED;
 }
 }

+ 0 - 1
drivers/gpu/drm/i915/i915_pci.c

@@ -592,7 +592,6 @@ static const struct intel_device_info intel_cannonlake_info = {
 	GEN10_FEATURES, \
 	GEN10_FEATURES, \
 	GEN(11), \
 	GEN(11), \
 	.ddb_size = 2048, \
 	.ddb_size = 2048, \
-	.has_csr = 0, \
 	.has_logical_ring_elsq = 1
 	.has_logical_ring_elsq = 1
 
 
 static const struct intel_device_info intel_icelake_11_info = {
 static const struct intel_device_info intel_icelake_11_info = {

+ 0 - 1
drivers/hid/hid-ids.h

@@ -976,7 +976,6 @@
 #define USB_DEVICE_ID_SIS817_TOUCH	0x0817
 #define USB_DEVICE_ID_SIS817_TOUCH	0x0817
 #define USB_DEVICE_ID_SIS_TS		0x1013
 #define USB_DEVICE_ID_SIS_TS		0x1013
 #define USB_DEVICE_ID_SIS1030_TOUCH	0x1030
 #define USB_DEVICE_ID_SIS1030_TOUCH	0x1030
-#define USB_DEVICE_ID_SIS10FB_TOUCH	0x10fb
 
 
 #define USB_VENDOR_ID_SKYCABLE			0x1223
 #define USB_VENDOR_ID_SKYCABLE			0x1223
 #define	USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER	0x3F07
 #define	USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER	0x3F07

+ 10 - 17
drivers/hid/i2c-hid/i2c-hid.c

@@ -47,7 +47,7 @@
 /* quirks to control the device */
 /* quirks to control the device */
 #define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV	BIT(0)
 #define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV	BIT(0)
 #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET	BIT(1)
 #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET	BIT(1)
-#define I2C_HID_QUIRK_RESEND_REPORT_DESCR	BIT(2)
+#define I2C_HID_QUIRK_NO_RUNTIME_PM		BIT(2)
 
 
 /* flags */
 /* flags */
 #define I2C_HID_STARTED		0
 #define I2C_HID_STARTED		0
@@ -169,9 +169,8 @@ static const struct i2c_hid_quirks {
 	{ USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755,
 	{ USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755,
 		I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
 		I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
 	{ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
 	{ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
-		I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
-	{ USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH,
-		I2C_HID_QUIRK_RESEND_REPORT_DESCR },
+		I2C_HID_QUIRK_NO_IRQ_AFTER_RESET |
+		I2C_HID_QUIRK_NO_RUNTIME_PM },
 	{ 0, 0 }
 	{ 0, 0 }
 };
 };
 
 
@@ -1105,7 +1104,9 @@ static int i2c_hid_probe(struct i2c_client *client,
 		goto err_mem_free;
 		goto err_mem_free;
 	}
 	}
 
 
-	pm_runtime_put(&client->dev);
+	if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
+		pm_runtime_put(&client->dev);
+
 	return 0;
 	return 0;
 
 
 err_mem_free:
 err_mem_free:
@@ -1130,7 +1131,8 @@ static int i2c_hid_remove(struct i2c_client *client)
 	struct i2c_hid *ihid = i2c_get_clientdata(client);
 	struct i2c_hid *ihid = i2c_get_clientdata(client);
 	struct hid_device *hid;
 	struct hid_device *hid;
 
 
-	pm_runtime_get_sync(&client->dev);
+	if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
+		pm_runtime_get_sync(&client->dev);
 	pm_runtime_disable(&client->dev);
 	pm_runtime_disable(&client->dev);
 	pm_runtime_set_suspended(&client->dev);
 	pm_runtime_set_suspended(&client->dev);
 	pm_runtime_put_noidle(&client->dev);
 	pm_runtime_put_noidle(&client->dev);
@@ -1236,22 +1238,13 @@ static int i2c_hid_resume(struct device *dev)
 
 
 	/* Instead of resetting device, simply powers the device on. This
 	/* Instead of resetting device, simply powers the device on. This
 	 * solves "incomplete reports" on Raydium devices 2386:3118 and
 	 * solves "incomplete reports" on Raydium devices 2386:3118 and
-	 * 2386:4B33
+	 * 2386:4B33 and fixes various SIS touchscreens no longer sending
+	 * data after a suspend/resume.
 	 */
 	 */
 	ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
 	ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	/* Some devices need to re-send report descr cmd
-	 * after resume, after this it will be back normal.
-	 * otherwise it issues too many incomplete reports.
-	 */
-	if (ihid->quirks & I2C_HID_QUIRK_RESEND_REPORT_DESCR) {
-		ret = i2c_hid_command(client, &hid_report_descr_cmd, NULL, 0);
-		if (ret)
-			return ret;
-	}
-
 	if (hid->driver && hid->driver->reset_resume) {
 	if (hid->driver && hid->driver->reset_resume) {
 		ret = hid->driver->reset_resume(hid);
 		ret = hid->driver->reset_resume(hid);
 		return ret;
 		return ret;

+ 1 - 0
drivers/hid/intel-ish-hid/ipc/hw-ish.h

@@ -29,6 +29,7 @@
 #define CNL_Ax_DEVICE_ID	0x9DFC
 #define CNL_Ax_DEVICE_ID	0x9DFC
 #define GLK_Ax_DEVICE_ID	0x31A2
 #define GLK_Ax_DEVICE_ID	0x31A2
 #define CNL_H_DEVICE_ID		0xA37C
 #define CNL_H_DEVICE_ID		0xA37C
+#define ICL_MOBILE_DEVICE_ID	0x34FC
 #define SPT_H_DEVICE_ID		0xA135
 #define SPT_H_DEVICE_ID		0xA135
 
 
 #define	REVISION_ID_CHT_A0	0x6
 #define	REVISION_ID_CHT_A0	0x6

+ 1 - 0
drivers/hid/intel-ish-hid/ipc/pci-ish.c

@@ -38,6 +38,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)},
 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)},
 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)},
 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)},
 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)},
 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
 	{0, }
 	{0, }
 };
 };

+ 5 - 3
drivers/hv/connection.c

@@ -76,6 +76,7 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
 					__u32 version)
 					__u32 version)
 {
 {
 	int ret = 0;
 	int ret = 0;
+	unsigned int cur_cpu;
 	struct vmbus_channel_initiate_contact *msg;
 	struct vmbus_channel_initiate_contact *msg;
 	unsigned long flags;
 	unsigned long flags;
 
 
@@ -118,9 +119,10 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
 	 * the CPU attempting to connect may not be CPU 0.
 	 * the CPU attempting to connect may not be CPU 0.
 	 */
 	 */
 	if (version >= VERSION_WIN8_1) {
 	if (version >= VERSION_WIN8_1) {
-		msg->target_vcpu =
-			hv_cpu_number_to_vp_number(smp_processor_id());
-		vmbus_connection.connect_cpu = smp_processor_id();
+		cur_cpu = get_cpu();
+		msg->target_vcpu = hv_cpu_number_to_vp_number(cur_cpu);
+		vmbus_connection.connect_cpu = cur_cpu;
+		put_cpu();
 	} else {
 	} else {
 		msg->target_vcpu = 0;
 		msg->target_vcpu = 0;
 		vmbus_connection.connect_cpu = 0;
 		vmbus_connection.connect_cpu = 0;

+ 3 - 1
drivers/i2c/busses/i2c-designware-master.c

@@ -34,11 +34,11 @@ static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev)
 
 
 static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
 static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
 {
 {
-	u32 ic_clk = i2c_dw_clk_rate(dev);
 	const char *mode_str, *fp_str = "";
 	const char *mode_str, *fp_str = "";
 	u32 comp_param1;
 	u32 comp_param1;
 	u32 sda_falling_time, scl_falling_time;
 	u32 sda_falling_time, scl_falling_time;
 	struct i2c_timings *t = &dev->timings;
 	struct i2c_timings *t = &dev->timings;
+	u32 ic_clk;
 	int ret;
 	int ret;
 
 
 	ret = i2c_dw_acquire_lock(dev);
 	ret = i2c_dw_acquire_lock(dev);
@@ -53,6 +53,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
 
 
 	/* Calculate SCL timing parameters for standard mode if not set */
 	/* Calculate SCL timing parameters for standard mode if not set */
 	if (!dev->ss_hcnt || !dev->ss_lcnt) {
 	if (!dev->ss_hcnt || !dev->ss_lcnt) {
+		ic_clk = i2c_dw_clk_rate(dev);
 		dev->ss_hcnt =
 		dev->ss_hcnt =
 			i2c_dw_scl_hcnt(ic_clk,
 			i2c_dw_scl_hcnt(ic_clk,
 					4000,	/* tHD;STA = tHIGH = 4.0 us */
 					4000,	/* tHD;STA = tHIGH = 4.0 us */
@@ -89,6 +90,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
 	 * needed also in high speed mode.
 	 * needed also in high speed mode.
 	 */
 	 */
 	if (!dev->fs_hcnt || !dev->fs_lcnt) {
 	if (!dev->fs_hcnt || !dev->fs_lcnt) {
+		ic_clk = i2c_dw_clk_rate(dev);
 		dev->fs_hcnt =
 		dev->fs_hcnt =
 			i2c_dw_scl_hcnt(ic_clk,
 			i2c_dw_scl_hcnt(ic_clk,
 					600,	/* tHD;STA = tHIGH = 0.6 us */
 					600,	/* tHD;STA = tHIGH = 0.6 us */

+ 1 - 1
drivers/i2c/busses/i2c-isch.c

@@ -164,7 +164,7 @@ static s32 sch_access(struct i2c_adapter *adap, u16 addr,
 		 * run ~75 kHz instead which should do no harm.
 		 * run ~75 kHz instead which should do no harm.
 		 */
 		 */
 		dev_notice(&sch_adapter.dev,
 		dev_notice(&sch_adapter.dev,
-			"Clock divider unitialized. Setting defaults\n");
+			"Clock divider uninitialized. Setting defaults\n");
 		outw(backbone_speed / (4 * 100), SMBHSTCLK);
 		outw(backbone_speed / (4 * 100), SMBHSTCLK);
 	}
 	}
 
 

+ 18 - 4
drivers/i2c/busses/i2c-qcom-geni.c

@@ -367,20 +367,26 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
 	dma_addr_t rx_dma;
 	dma_addr_t rx_dma;
 	enum geni_se_xfer_mode mode;
 	enum geni_se_xfer_mode mode;
 	unsigned long time_left = XFER_TIMEOUT;
 	unsigned long time_left = XFER_TIMEOUT;
+	void *dma_buf;
 
 
 	gi2c->cur = msg;
 	gi2c->cur = msg;
-	mode = msg->len > 32 ? GENI_SE_DMA : GENI_SE_FIFO;
+	mode = GENI_SE_FIFO;
+	dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
+	if (dma_buf)
+		mode = GENI_SE_DMA;
+
 	geni_se_select_mode(&gi2c->se, mode);
 	geni_se_select_mode(&gi2c->se, mode);
 	writel_relaxed(msg->len, gi2c->se.base + SE_I2C_RX_TRANS_LEN);
 	writel_relaxed(msg->len, gi2c->se.base + SE_I2C_RX_TRANS_LEN);
 	geni_se_setup_m_cmd(&gi2c->se, I2C_READ, m_param);
 	geni_se_setup_m_cmd(&gi2c->se, I2C_READ, m_param);
 	if (mode == GENI_SE_DMA) {
 	if (mode == GENI_SE_DMA) {
 		int ret;
 		int ret;
 
 
-		ret = geni_se_rx_dma_prep(&gi2c->se, msg->buf, msg->len,
+		ret = geni_se_rx_dma_prep(&gi2c->se, dma_buf, msg->len,
 								&rx_dma);
 								&rx_dma);
 		if (ret) {
 		if (ret) {
 			mode = GENI_SE_FIFO;
 			mode = GENI_SE_FIFO;
 			geni_se_select_mode(&gi2c->se, mode);
 			geni_se_select_mode(&gi2c->se, mode);
+			i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
 		}
 		}
 	}
 	}
 
 
@@ -393,6 +399,7 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
 		if (gi2c->err)
 		if (gi2c->err)
 			geni_i2c_rx_fsm_rst(gi2c);
 			geni_i2c_rx_fsm_rst(gi2c);
 		geni_se_rx_dma_unprep(&gi2c->se, rx_dma, msg->len);
 		geni_se_rx_dma_unprep(&gi2c->se, rx_dma, msg->len);
+		i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err);
 	}
 	}
 	return gi2c->err;
 	return gi2c->err;
 }
 }
@@ -403,20 +410,26 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
 	dma_addr_t tx_dma;
 	dma_addr_t tx_dma;
 	enum geni_se_xfer_mode mode;
 	enum geni_se_xfer_mode mode;
 	unsigned long time_left;
 	unsigned long time_left;
+	void *dma_buf;
 
 
 	gi2c->cur = msg;
 	gi2c->cur = msg;
-	mode = msg->len > 32 ? GENI_SE_DMA : GENI_SE_FIFO;
+	mode = GENI_SE_FIFO;
+	dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
+	if (dma_buf)
+		mode = GENI_SE_DMA;
+
 	geni_se_select_mode(&gi2c->se, mode);
 	geni_se_select_mode(&gi2c->se, mode);
 	writel_relaxed(msg->len, gi2c->se.base + SE_I2C_TX_TRANS_LEN);
 	writel_relaxed(msg->len, gi2c->se.base + SE_I2C_TX_TRANS_LEN);
 	geni_se_setup_m_cmd(&gi2c->se, I2C_WRITE, m_param);
 	geni_se_setup_m_cmd(&gi2c->se, I2C_WRITE, m_param);
 	if (mode == GENI_SE_DMA) {
 	if (mode == GENI_SE_DMA) {
 		int ret;
 		int ret;
 
 
-		ret = geni_se_tx_dma_prep(&gi2c->se, msg->buf, msg->len,
+		ret = geni_se_tx_dma_prep(&gi2c->se, dma_buf, msg->len,
 								&tx_dma);
 								&tx_dma);
 		if (ret) {
 		if (ret) {
 			mode = GENI_SE_FIFO;
 			mode = GENI_SE_FIFO;
 			geni_se_select_mode(&gi2c->se, mode);
 			geni_se_select_mode(&gi2c->se, mode);
+			i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
 		}
 		}
 	}
 	}
 
 
@@ -432,6 +445,7 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
 		if (gi2c->err)
 		if (gi2c->err)
 			geni_i2c_tx_fsm_rst(gi2c);
 			geni_i2c_tx_fsm_rst(gi2c);
 		geni_se_tx_dma_unprep(&gi2c->se, tx_dma, msg->len);
 		geni_se_tx_dma_unprep(&gi2c->se, tx_dma, msg->len);
+		i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err);
 	}
 	}
 	return gi2c->err;
 	return gi2c->err;
 }
 }

+ 1 - 0
drivers/i2c/busses/i2c-scmi.c

@@ -152,6 +152,7 @@ acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
 			mt_params[3].type = ACPI_TYPE_INTEGER;
 			mt_params[3].type = ACPI_TYPE_INTEGER;
 			mt_params[3].integer.value = len;
 			mt_params[3].integer.value = len;
 			mt_params[4].type = ACPI_TYPE_BUFFER;
 			mt_params[4].type = ACPI_TYPE_BUFFER;
+			mt_params[4].buffer.length = len;
 			mt_params[4].buffer.pointer = data->block + 1;
 			mt_params[4].buffer.pointer = data->block + 1;
 		}
 		}
 		break;
 		break;

+ 1 - 1
drivers/iommu/amd_iommu.c

@@ -3069,7 +3069,7 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
 		return 0;
 		return 0;
 
 
 	offset_mask = pte_pgsize - 1;
 	offset_mask = pte_pgsize - 1;
-	__pte	    = *pte & PM_ADDR_MASK;
+	__pte	    = __sme_clr(*pte & PM_ADDR_MASK);
 
 
 	return (__pte & ~offset_mask) | (iova & offset_mask);
 	return (__pte & ~offset_mask) | (iova & offset_mask);
 }
 }

+ 2 - 2
drivers/md/dm-cache-metadata.c

@@ -1455,8 +1455,8 @@ static int __load_mappings(struct dm_cache_metadata *cmd,
 		if (hints_valid) {
 		if (hints_valid) {
 			r = dm_array_cursor_next(&cmd->hint_cursor);
 			r = dm_array_cursor_next(&cmd->hint_cursor);
 			if (r) {
 			if (r) {
-				DMERR("dm_array_cursor_next for hint failed");
-				goto out;
+				dm_array_cursor_end(&cmd->hint_cursor);
+				hints_valid = false;
 			}
 			}
 		}
 		}
 
 

+ 7 - 2
drivers/md/dm-cache-target.c

@@ -3009,8 +3009,13 @@ static dm_cblock_t get_cache_dev_size(struct cache *cache)
 
 
 static bool can_resize(struct cache *cache, dm_cblock_t new_size)
 static bool can_resize(struct cache *cache, dm_cblock_t new_size)
 {
 {
-	if (from_cblock(new_size) > from_cblock(cache->cache_size))
-		return true;
+	if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
+		if (cache->sized) {
+			DMERR("%s: unable to extend cache due to missing cache table reload",
+			      cache_device_name(cache));
+			return false;
+		}
+	}
 
 
 	/*
 	/*
 	 * We can't drop a dirty block when shrinking the cache.
 	 * We can't drop a dirty block when shrinking the cache.

+ 8 - 6
drivers/md/dm-mpath.c

@@ -806,19 +806,19 @@ static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
 }
 }
 
 
 static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
 static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
-			 const char *attached_handler_name, char **error)
+			 const char **attached_handler_name, char **error)
 {
 {
 	struct request_queue *q = bdev_get_queue(bdev);
 	struct request_queue *q = bdev_get_queue(bdev);
 	int r;
 	int r;
 
 
 	if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
 	if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
 retain:
 retain:
-		if (attached_handler_name) {
+		if (*attached_handler_name) {
 			/*
 			/*
 			 * Clear any hw_handler_params associated with a
 			 * Clear any hw_handler_params associated with a
 			 * handler that isn't already attached.
 			 * handler that isn't already attached.
 			 */
 			 */
-			if (m->hw_handler_name && strcmp(attached_handler_name, m->hw_handler_name)) {
+			if (m->hw_handler_name && strcmp(*attached_handler_name, m->hw_handler_name)) {
 				kfree(m->hw_handler_params);
 				kfree(m->hw_handler_params);
 				m->hw_handler_params = NULL;
 				m->hw_handler_params = NULL;
 			}
 			}
@@ -830,7 +830,8 @@ retain:
 			 * handler instead of the original table passed in.
 			 * handler instead of the original table passed in.
 			 */
 			 */
 			kfree(m->hw_handler_name);
 			kfree(m->hw_handler_name);
-			m->hw_handler_name = attached_handler_name;
+			m->hw_handler_name = *attached_handler_name;
+			*attached_handler_name = NULL;
 		}
 		}
 	}
 	}
 
 
@@ -867,7 +868,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
 	struct pgpath *p;
 	struct pgpath *p;
 	struct multipath *m = ti->private;
 	struct multipath *m = ti->private;
 	struct request_queue *q;
 	struct request_queue *q;
-	const char *attached_handler_name;
+	const char *attached_handler_name = NULL;
 
 
 	/* we need at least a path arg */
 	/* we need at least a path arg */
 	if (as->argc < 1) {
 	if (as->argc < 1) {
@@ -890,7 +891,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
 	attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
 	attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
 	if (attached_handler_name || m->hw_handler_name) {
 	if (attached_handler_name || m->hw_handler_name) {
 		INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
 		INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
-		r = setup_scsi_dh(p->path.dev->bdev, m, attached_handler_name, &ti->error);
+		r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
 		if (r) {
 		if (r) {
 			dm_put_device(ti, p->path.dev);
 			dm_put_device(ti, p->path.dev);
 			goto bad;
 			goto bad;
@@ -905,6 +906,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
 
 
 	return p;
 	return p;
  bad:
  bad:
+	kfree(attached_handler_name);
 	free_pgpath(p);
 	free_pgpath(p);
 	return ERR_PTR(r);
 	return ERR_PTR(r);
 }
 }

+ 1 - 1
drivers/md/dm-raid.c

@@ -3353,7 +3353,7 @@ static const char *sync_str(enum sync_state state)
 };
 };
 
 
 /* Return enum sync_state for @mddev derived from @recovery flags */
 /* Return enum sync_state for @mddev derived from @recovery flags */
-static const enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery)
+static enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery)
 {
 {
 	if (test_bit(MD_RECOVERY_FROZEN, &recovery))
 	if (test_bit(MD_RECOVERY_FROZEN, &recovery))
 		return st_frozen;
 		return st_frozen;

+ 2 - 4
drivers/md/dm-thin-metadata.c

@@ -832,10 +832,8 @@ static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
 	if (r) {
 	if (r) {
 		DMERR("could not get size of metadata device");
 		DMERR("could not get size of metadata device");
 		pmd->metadata_reserve = max_blocks;
 		pmd->metadata_reserve = max_blocks;
-	} else {
-		sector_div(total, 10);
-		pmd->metadata_reserve = min(max_blocks, total);
-	}
+	} else
+		pmd->metadata_reserve = min(max_blocks, div_u64(total, 10));
 }
 }
 
 
 struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
 struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,

+ 20 - 18
drivers/media/v4l2-core/v4l2-event.c

@@ -115,14 +115,6 @@ static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *e
 	if (sev == NULL)
 	if (sev == NULL)
 		return;
 		return;
 
 
-	/*
-	 * If the event has been added to the fh->subscribed list, but its
-	 * add op has not completed yet elems will be 0, treat this as
-	 * not being subscribed.
-	 */
-	if (!sev->elems)
-		return;
-
 	/* Increase event sequence number on fh. */
 	/* Increase event sequence number on fh. */
 	fh->sequence++;
 	fh->sequence++;
 
 
@@ -208,6 +200,7 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
 	struct v4l2_subscribed_event *sev, *found_ev;
 	struct v4l2_subscribed_event *sev, *found_ev;
 	unsigned long flags;
 	unsigned long flags;
 	unsigned i;
 	unsigned i;
+	int ret = 0;
 
 
 	if (sub->type == V4L2_EVENT_ALL)
 	if (sub->type == V4L2_EVENT_ALL)
 		return -EINVAL;
 		return -EINVAL;
@@ -225,31 +218,36 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
 	sev->flags = sub->flags;
 	sev->flags = sub->flags;
 	sev->fh = fh;
 	sev->fh = fh;
 	sev->ops = ops;
 	sev->ops = ops;
+	sev->elems = elems;
+
+	mutex_lock(&fh->subscribe_lock);
 
 
 	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
 	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
 	found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
 	found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
-	if (!found_ev)
-		list_add(&sev->list, &fh->subscribed);
 	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
 	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
 
 
 	if (found_ev) {
 	if (found_ev) {
+		/* Already listening */
 		kvfree(sev);
 		kvfree(sev);
-		return 0; /* Already listening */
+		goto out_unlock;
 	}
 	}
 
 
 	if (sev->ops && sev->ops->add) {
 	if (sev->ops && sev->ops->add) {
-		int ret = sev->ops->add(sev, elems);
+		ret = sev->ops->add(sev, elems);
 		if (ret) {
 		if (ret) {
-			sev->ops = NULL;
-			v4l2_event_unsubscribe(fh, sub);
-			return ret;
+			kvfree(sev);
+			goto out_unlock;
 		}
 		}
 	}
 	}
 
 
-	/* Mark as ready for use */
-	sev->elems = elems;
+	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+	list_add(&sev->list, &fh->subscribed);
+	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
 
 
-	return 0;
+out_unlock:
+	mutex_unlock(&fh->subscribe_lock);
+
+	return ret;
 }
 }
 EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
 EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
 
 
@@ -288,6 +286,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
 		return 0;
 		return 0;
 	}
 	}
 
 
+	mutex_lock(&fh->subscribe_lock);
+
 	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
 	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
 
 
 	sev = v4l2_event_subscribed(fh, sub->type, sub->id);
 	sev = v4l2_event_subscribed(fh, sub->type, sub->id);
@@ -305,6 +305,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
 	if (sev && sev->ops && sev->ops->del)
 	if (sev && sev->ops && sev->ops->del)
 		sev->ops->del(sev);
 		sev->ops->del(sev);
 
 
+	mutex_unlock(&fh->subscribe_lock);
+
 	kvfree(sev);
 	kvfree(sev);
 
 
 	return 0;
 	return 0;

+ 2 - 0
drivers/media/v4l2-core/v4l2-fh.c

@@ -45,6 +45,7 @@ void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
 	INIT_LIST_HEAD(&fh->available);
 	INIT_LIST_HEAD(&fh->available);
 	INIT_LIST_HEAD(&fh->subscribed);
 	INIT_LIST_HEAD(&fh->subscribed);
 	fh->sequence = -1;
 	fh->sequence = -1;
+	mutex_init(&fh->subscribe_lock);
 }
 }
 EXPORT_SYMBOL_GPL(v4l2_fh_init);
 EXPORT_SYMBOL_GPL(v4l2_fh_init);
 
 
@@ -90,6 +91,7 @@ void v4l2_fh_exit(struct v4l2_fh *fh)
 		return;
 		return;
 	v4l_disable_media_source(fh->vdev);
 	v4l_disable_media_source(fh->vdev);
 	v4l2_event_unsubscribe_all(fh);
 	v4l2_event_unsubscribe_all(fh);
+	mutex_destroy(&fh->subscribe_lock);
 	fh->vdev = NULL;
 	fh->vdev = NULL;
 }
 }
 EXPORT_SYMBOL_GPL(v4l2_fh_exit);
 EXPORT_SYMBOL_GPL(v4l2_fh_exit);

+ 1 - 1
drivers/mmc/core/host.c

@@ -235,7 +235,7 @@ int mmc_of_parse(struct mmc_host *host)
 			host->caps |= MMC_CAP_NEEDS_POLL;
 			host->caps |= MMC_CAP_NEEDS_POLL;
 
 
 		ret = mmc_gpiod_request_cd(host, "cd", 0, true,
 		ret = mmc_gpiod_request_cd(host, "cd", 0, true,
-					   cd_debounce_delay_ms,
+					   cd_debounce_delay_ms * 1000,
 					   &cd_gpio_invert);
 					   &cd_gpio_invert);
 		if (!ret)
 		if (!ret)
 			dev_info(host->parent, "Got CD GPIO\n");
 			dev_info(host->parent, "Got CD GPIO\n");

+ 1 - 1
drivers/mmc/core/slot-gpio.c

@@ -271,7 +271,7 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
 	if (debounce) {
 	if (debounce) {
 		ret = gpiod_set_debounce(desc, debounce);
 		ret = gpiod_set_debounce(desc, debounce);
 		if (ret < 0)
 		if (ret < 0)
-			ctx->cd_debounce_delay_ms = debounce;
+			ctx->cd_debounce_delay_ms = debounce / 1000;
 	}
 	}
 
 
 	if (gpio_invert)
 	if (gpio_invert)

+ 2 - 1
drivers/mmc/host/renesas_sdhi_sys_dmac.c

@@ -498,7 +498,8 @@ static const struct soc_device_attribute gen3_soc_whitelist[] = {
 
 
 static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev)
 static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev)
 {
 {
-	if (of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible &&
+	if ((of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible ||
+	    of_device_get_match_data(&pdev->dev) == &of_rcar_r8a7795_compatible) &&
 	    !soc_device_match(gen3_soc_whitelist))
 	    !soc_device_match(gen3_soc_whitelist))
 		return -ENODEV;
 		return -ENODEV;
 
 

+ 37 - 28
drivers/net/bonding/bond_main.c

@@ -210,6 +210,7 @@ static void bond_get_stats(struct net_device *bond_dev,
 static void bond_slave_arr_handler(struct work_struct *work);
 static void bond_slave_arr_handler(struct work_struct *work);
 static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
 static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
 				  int mod);
 				  int mod);
+static void bond_netdev_notify_work(struct work_struct *work);
 
 
 /*---------------------------- General routines -----------------------------*/
 /*---------------------------- General routines -----------------------------*/
 
 
@@ -1170,9 +1171,27 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
 		}
 		}
 	}
 	}
 
 
-	/* don't change skb->dev for link-local packets */
-	if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
+	/* Link-local multicast packets should be passed to the
+	 * stack on the link they arrive as well as pass them to the
+	 * bond-master device. These packets are mostly usable when
+	 * stack receives it with the link on which they arrive
+	 * (e.g. LLDP) they also must be available on master. Some of
+	 * the use cases include (but are not limited to): LLDP agents
+	 * that must be able to operate both on enslaved interfaces as
+	 * well as on bonds themselves; linux bridges that must be able
+	 * to process/pass BPDUs from attached bonds when any kind of
+	 * STP version is enabled on the network.
+	 */
+	if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) {
+		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
+
+		if (nskb) {
+			nskb->dev = bond->dev;
+			nskb->queue_mapping = 0;
+			netif_rx(nskb);
+		}
 		return RX_HANDLER_PASS;
 		return RX_HANDLER_PASS;
+	}
 	if (bond_should_deliver_exact_match(skb, slave, bond))
 	if (bond_should_deliver_exact_match(skb, slave, bond))
 		return RX_HANDLER_EXACT;
 		return RX_HANDLER_EXACT;
 
 
@@ -1269,6 +1288,8 @@ static struct slave *bond_alloc_slave(struct bonding *bond)
 			return NULL;
 			return NULL;
 		}
 		}
 	}
 	}
+	INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
+
 	return slave;
 	return slave;
 }
 }
 
 
@@ -1276,6 +1297,7 @@ static void bond_free_slave(struct slave *slave)
 {
 {
 	struct bonding *bond = bond_get_bond_by_slave(slave);
 	struct bonding *bond = bond_get_bond_by_slave(slave);
 
 
+	cancel_delayed_work_sync(&slave->notify_work);
 	if (BOND_MODE(bond) == BOND_MODE_8023AD)
 	if (BOND_MODE(bond) == BOND_MODE_8023AD)
 		kfree(SLAVE_AD_INFO(slave));
 		kfree(SLAVE_AD_INFO(slave));
 
 
@@ -1297,39 +1319,26 @@ static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
 	info->link_failure_count = slave->link_failure_count;
 	info->link_failure_count = slave->link_failure_count;
 }
 }
 
 
-static void bond_netdev_notify(struct net_device *dev,
-			       struct netdev_bonding_info *info)
-{
-	rtnl_lock();
-	netdev_bonding_info_change(dev, info);
-	rtnl_unlock();
-}
-
 static void bond_netdev_notify_work(struct work_struct *_work)
 static void bond_netdev_notify_work(struct work_struct *_work)
 {
 {
-	struct netdev_notify_work *w =
-		container_of(_work, struct netdev_notify_work, work.work);
+	struct slave *slave = container_of(_work, struct slave,
+					   notify_work.work);
+
+	if (rtnl_trylock()) {
+		struct netdev_bonding_info binfo;
 
 
-	bond_netdev_notify(w->dev, &w->bonding_info);
-	dev_put(w->dev);
-	kfree(w);
+		bond_fill_ifslave(slave, &binfo.slave);
+		bond_fill_ifbond(slave->bond, &binfo.master);
+		netdev_bonding_info_change(slave->dev, &binfo);
+		rtnl_unlock();
+	} else {
+		queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
+	}
 }
 }
 
 
 void bond_queue_slave_event(struct slave *slave)
 void bond_queue_slave_event(struct slave *slave)
 {
 {
-	struct bonding *bond = slave->bond;
-	struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);
-
-	if (!nnw)
-		return;
-
-	dev_hold(slave->dev);
-	nnw->dev = slave->dev;
-	bond_fill_ifslave(slave, &nnw->bonding_info.slave);
-	bond_fill_ifbond(bond, &nnw->bonding_info.master);
-	INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
-
-	queue_delayed_work(slave->bond->wq, &nnw->work, 0);
+	queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
 }
 }
 
 
 void bond_lower_state_changed(struct slave *slave)
 void bond_lower_state_changed(struct slave *slave)

+ 2 - 2
drivers/net/dsa/b53/b53_common.c

@@ -1107,7 +1107,7 @@ void b53_vlan_add(struct dsa_switch *ds, int port,
 		b53_get_vlan_entry(dev, vid, vl);
 		b53_get_vlan_entry(dev, vid, vl);
 
 
 		vl->members |= BIT(port);
 		vl->members |= BIT(port);
-		if (untagged)
+		if (untagged && !dsa_is_cpu_port(ds, port))
 			vl->untag |= BIT(port);
 			vl->untag |= BIT(port);
 		else
 		else
 			vl->untag &= ~BIT(port);
 			vl->untag &= ~BIT(port);
@@ -1149,7 +1149,7 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
 				pvid = 0;
 				pvid = 0;
 		}
 		}
 
 
-		if (untagged)
+		if (untagged && !dsa_is_cpu_port(ds, port))
 			vl->untag &= ~(BIT(port));
 			vl->untag &= ~(BIT(port));
 
 
 		b53_set_vlan_entry(dev, vid, vl);
 		b53_set_vlan_entry(dev, vid, vl);

+ 0 - 22
drivers/net/ethernet/amazon/ena/ena_netdev.c

@@ -2185,25 +2185,6 @@ error_drop_packet:
 	return NETDEV_TX_OK;
 	return NETDEV_TX_OK;
 }
 }
 
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ena_netpoll(struct net_device *netdev)
-{
-	struct ena_adapter *adapter = netdev_priv(netdev);
-	int i;
-
-	/* Dont schedule NAPI if the driver is in the middle of reset
-	 * or netdev is down.
-	 */
-
-	if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags) ||
-	    test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
-		return;
-
-	for (i = 0; i < adapter->num_queues; i++)
-		napi_schedule(&adapter->ena_napi[i].napi);
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
 static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
 static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
 			    struct net_device *sb_dev,
 			    struct net_device *sb_dev,
 			    select_queue_fallback_t fallback)
 			    select_queue_fallback_t fallback)
@@ -2369,9 +2350,6 @@ static const struct net_device_ops ena_netdev_ops = {
 	.ndo_change_mtu		= ena_change_mtu,
 	.ndo_change_mtu		= ena_change_mtu,
 	.ndo_set_mac_address	= NULL,
 	.ndo_set_mac_address	= NULL,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_validate_addr	= eth_validate_addr,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	.ndo_poll_controller	= ena_netpoll,
-#endif /* CONFIG_NET_POLL_CONTROLLER */
 };
 };
 
 
 static int ena_device_validate_params(struct ena_adapter *adapter,
 static int ena_device_validate_params(struct ena_adapter *adapter,

+ 6 - 4
drivers/net/ethernet/amd/declance.c

@@ -1031,6 +1031,7 @@ static int dec_lance_probe(struct device *bdev, const int type)
 	int i, ret;
 	int i, ret;
 	unsigned long esar_base;
 	unsigned long esar_base;
 	unsigned char *esar;
 	unsigned char *esar;
+	const char *desc;
 
 
 	if (dec_lance_debug && version_printed++ == 0)
 	if (dec_lance_debug && version_printed++ == 0)
 		printk(version);
 		printk(version);
@@ -1216,19 +1217,20 @@ static int dec_lance_probe(struct device *bdev, const int type)
 	 */
 	 */
 	switch (type) {
 	switch (type) {
 	case ASIC_LANCE:
 	case ASIC_LANCE:
-		printk("%s: IOASIC onboard LANCE", name);
+		desc = "IOASIC onboard LANCE";
 		break;
 		break;
 	case PMAD_LANCE:
 	case PMAD_LANCE:
-		printk("%s: PMAD-AA", name);
+		desc = "PMAD-AA";
 		break;
 		break;
 	case PMAX_LANCE:
 	case PMAX_LANCE:
-		printk("%s: PMAX onboard LANCE", name);
+		desc = "PMAX onboard LANCE";
 		break;
 		break;
 	}
 	}
 	for (i = 0; i < 6; i++)
 	for (i = 0; i < 6; i++)
 		dev->dev_addr[i] = esar[i * 4];
 		dev->dev_addr[i] = esar[i * 4];
 
 
-	printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq);
+	printk("%s: %s, addr = %pM, irq = %d\n",
+	       name, desc, dev->dev_addr, dev->irq);
 
 
 	dev->netdev_ops = &lance_netdev_ops;
 	dev->netdev_ops = &lance_netdev_ops;
 	dev->watchdog_timeo = 5*HZ;
 	dev->watchdog_timeo = 5*HZ;

+ 11 - 17
drivers/net/ethernet/broadcom/bcmsysport.c

@@ -1069,9 +1069,6 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
 {
 {
 	u32 reg;
 	u32 reg;
 
 
-	/* Stop monitoring MPD interrupt */
-	intrl2_0_mask_set(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
-
 	/* Disable RXCHK, active filters and Broadcom tag matching */
 	/* Disable RXCHK, active filters and Broadcom tag matching */
 	reg = rxchk_readl(priv, RXCHK_CONTROL);
 	reg = rxchk_readl(priv, RXCHK_CONTROL);
 	reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
 	reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
@@ -1081,6 +1078,17 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
 	/* Clear the MagicPacket detection logic */
 	/* Clear the MagicPacket detection logic */
 	mpd_enable_set(priv, false);
 	mpd_enable_set(priv, false);
 
 
+	reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
+	if (reg & INTRL2_0_MPD)
+		netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
+
+	if (reg & INTRL2_0_BRCM_MATCH_TAG) {
+		reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
+				  RXCHK_BRCM_TAG_MATCH_MASK;
+		netdev_info(priv->netdev,
+			    "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
+	}
+
 	netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
 	netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
 }
 }
 
 
@@ -1105,7 +1113,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
 	struct bcm_sysport_priv *priv = netdev_priv(dev);
 	struct bcm_sysport_priv *priv = netdev_priv(dev);
 	struct bcm_sysport_tx_ring *txr;
 	struct bcm_sysport_tx_ring *txr;
 	unsigned int ring, ring_bit;
 	unsigned int ring, ring_bit;
-	u32 reg;
 
 
 	priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
 	priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
 			  ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
 			  ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
@@ -1131,16 +1138,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
 	if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
 	if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
 		bcm_sysport_tx_reclaim_all(priv);
 		bcm_sysport_tx_reclaim_all(priv);
 
 
-	if (priv->irq0_stat & INTRL2_0_MPD)
-		netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
-
-	if (priv->irq0_stat & INTRL2_0_BRCM_MATCH_TAG) {
-		reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
-				  RXCHK_BRCM_TAG_MATCH_MASK;
-		netdev_info(priv->netdev,
-			    "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
-	}
-
 	if (!priv->is_lite)
 	if (!priv->is_lite)
 		goto out;
 		goto out;
 
 
@@ -2641,9 +2638,6 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
 	/* UniMAC receive needs to be turned on */
 	/* UniMAC receive needs to be turned on */
 	umac_enable_set(priv, CMD_RX_EN, 1);
 	umac_enable_set(priv, CMD_RX_EN, 1);
 
 
-	/* Enable the interrupt wake-up source */
-	intrl2_0_mask_clear(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
-
 	netif_dbg(priv, wol, ndev, "entered WOL mode\n");
 	netif_dbg(priv, wol, ndev, "entered WOL mode\n");
 
 
 	return 0;
 	return 0;

+ 18 - 9
drivers/net/ethernet/broadcom/bnxt/bnxt.c

@@ -1884,8 +1884,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
 		if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
 		if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
 			tx_pkts++;
 			tx_pkts++;
 			/* return full budget so NAPI will complete. */
 			/* return full budget so NAPI will complete. */
-			if (unlikely(tx_pkts > bp->tx_wake_thresh))
+			if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
 				rx_pkts = budget;
 				rx_pkts = budget;
+				raw_cons = NEXT_RAW_CMP(raw_cons);
+				break;
+			}
 		} else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
 		} else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
 			if (likely(budget))
 			if (likely(budget))
 				rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
 				rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
@@ -1913,7 +1916,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
 		}
 		}
 		raw_cons = NEXT_RAW_CMP(raw_cons);
 		raw_cons = NEXT_RAW_CMP(raw_cons);
 
 
-		if (rx_pkts == budget)
+		if (rx_pkts && rx_pkts == budget)
 			break;
 			break;
 	}
 	}
 
 
@@ -2027,8 +2030,12 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
 	while (1) {
 	while (1) {
 		work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
 		work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
 
 
-		if (work_done >= budget)
+		if (work_done >= budget) {
+			if (!budget)
+				BNXT_CP_DB_REARM(cpr->cp_doorbell,
+						 cpr->cp_raw_cons);
 			break;
 			break;
+		}
 
 
 		if (!bnxt_has_work(bp, cpr)) {
 		if (!bnxt_has_work(bp, cpr)) {
 			if (napi_complete_done(napi, work_done))
 			if (napi_complete_done(napi, work_done))
@@ -3010,10 +3017,11 @@ static void bnxt_free_hwrm_resources(struct bnxt *bp)
 {
 {
 	struct pci_dev *pdev = bp->pdev;
 	struct pci_dev *pdev = bp->pdev;
 
 
-	dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
-			  bp->hwrm_cmd_resp_dma_addr);
-
-	bp->hwrm_cmd_resp_addr = NULL;
+	if (bp->hwrm_cmd_resp_addr) {
+		dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
+				  bp->hwrm_cmd_resp_dma_addr);
+		bp->hwrm_cmd_resp_addr = NULL;
+	}
 }
 }
 
 
 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
@@ -4643,7 +4651,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
 				      FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
 				      FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
 		enables |= ring_grps ?
 		enables |= ring_grps ?
 			   FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
 			   FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
-		enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+		enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
 
 
 		req->num_rx_rings = cpu_to_le16(rx_rings);
 		req->num_rx_rings = cpu_to_le16(rx_rings);
 		req->num_hw_ring_grps = cpu_to_le16(ring_grps);
 		req->num_hw_ring_grps = cpu_to_le16(ring_grps);
@@ -8614,7 +8622,7 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
 	*max_tx = hw_resc->max_tx_rings;
 	*max_tx = hw_resc->max_tx_rings;
 	*max_rx = hw_resc->max_rx_rings;
 	*max_rx = hw_resc->max_rx_rings;
 	*max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp),
 	*max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp),
-			hw_resc->max_irqs);
+			hw_resc->max_irqs - bnxt_get_ulp_msix_num(bp));
 	*max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
 	*max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
 	max_ring_grps = hw_resc->max_hw_ring_grps;
 	max_ring_grps = hw_resc->max_hw_ring_grps;
 	if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
 	if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
@@ -9050,6 +9058,7 @@ init_err_cleanup_tc:
 	bnxt_clear_int_mode(bp);
 	bnxt_clear_int_mode(bp);
 
 
 init_err_pci_clean:
 init_err_pci_clean:
+	bnxt_free_hwrm_resources(bp);
 	bnxt_cleanup_pci(bp);
 	bnxt_cleanup_pci(bp);
 
 
 init_err_free:
 init_err_free:

+ 3 - 3
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c

@@ -98,13 +98,13 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
 
 
 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
 	for (i = 0; i < max_tc; i++) {
 	for (i = 0; i < max_tc; i++) {
-		u8 qidx;
+		u8 qidx = bp->tc_to_qidx[i];
 
 
 		req.enables |= cpu_to_le32(
 		req.enables |= cpu_to_le32(
-			QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i);
+			QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID <<
+			qidx);
 
 
 		memset(&cos2bw, 0, sizeof(cos2bw));
 		memset(&cos2bw, 0, sizeof(cos2bw));
-		qidx = bp->tc_to_qidx[i];
 		cos2bw.queue_id = bp->q_info[qidx].queue_id;
 		cos2bw.queue_id = bp->q_info[qidx].queue_id;
 		if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) {
 		if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) {
 			cos2bw.tsa =
 			cos2bw.tsa =

+ 1 - 0
drivers/net/ethernet/cadence/macb_main.c

@@ -2160,6 +2160,7 @@ static void macb_configure_dma(struct macb *bp)
 		else
 		else
 			dmacfg &= ~GEM_BIT(TXCOEN);
 			dmacfg &= ~GEM_BIT(TXCOEN);
 
 
+		dmacfg &= ~GEM_BIT(ADDR64);
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 		if (bp->hw_dma_cap & HW_DMA_CAP_64B)
 		if (bp->hw_dma_cap & HW_DMA_CAP_64B)
 			dmacfg |= GEM_BIT(ADDR64);
 			dmacfg |= GEM_BIT(ADDR64);

+ 17 - 0
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c

@@ -2159,6 +2159,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
 			return -EPERM;
 			return -EPERM;
 		if (copy_from_user(&t, useraddr, sizeof(t)))
 		if (copy_from_user(&t, useraddr, sizeof(t)))
 			return -EFAULT;
 			return -EFAULT;
+		if (t.cmd != CHELSIO_SET_QSET_PARAMS)
+			return -EINVAL;
 		if (t.qset_idx >= SGE_QSETS)
 		if (t.qset_idx >= SGE_QSETS)
 			return -EINVAL;
 			return -EINVAL;
 		if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
 		if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
@@ -2258,6 +2260,9 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
 		if (copy_from_user(&t, useraddr, sizeof(t)))
 		if (copy_from_user(&t, useraddr, sizeof(t)))
 			return -EFAULT;
 			return -EFAULT;
 
 
+		if (t.cmd != CHELSIO_GET_QSET_PARAMS)
+			return -EINVAL;
+
 		/* Display qsets for all ports when offload enabled */
 		/* Display qsets for all ports when offload enabled */
 		if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
 		if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
 			q1 = 0;
 			q1 = 0;
@@ -2303,6 +2308,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
 			return -EBUSY;
 			return -EBUSY;
 		if (copy_from_user(&edata, useraddr, sizeof(edata)))
 		if (copy_from_user(&edata, useraddr, sizeof(edata)))
 			return -EFAULT;
 			return -EFAULT;
+		if (edata.cmd != CHELSIO_SET_QSET_NUM)
+			return -EINVAL;
 		if (edata.val < 1 ||
 		if (edata.val < 1 ||
 			(edata.val > 1 && !(adapter->flags & USING_MSIX)))
 			(edata.val > 1 && !(adapter->flags & USING_MSIX)))
 			return -EINVAL;
 			return -EINVAL;
@@ -2343,6 +2350,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
 			return -EPERM;
 			return -EPERM;
 		if (copy_from_user(&t, useraddr, sizeof(t)))
 		if (copy_from_user(&t, useraddr, sizeof(t)))
 			return -EFAULT;
 			return -EFAULT;
+		if (t.cmd != CHELSIO_LOAD_FW)
+			return -EINVAL;
 		/* Check t.len sanity ? */
 		/* Check t.len sanity ? */
 		fw_data = memdup_user(useraddr + sizeof(t), t.len);
 		fw_data = memdup_user(useraddr + sizeof(t), t.len);
 		if (IS_ERR(fw_data))
 		if (IS_ERR(fw_data))
@@ -2366,6 +2375,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
 			return -EBUSY;
 			return -EBUSY;
 		if (copy_from_user(&m, useraddr, sizeof(m)))
 		if (copy_from_user(&m, useraddr, sizeof(m)))
 			return -EFAULT;
 			return -EFAULT;
+		if (m.cmd != CHELSIO_SETMTUTAB)
+			return -EINVAL;
 		if (m.nmtus != NMTUS)
 		if (m.nmtus != NMTUS)
 			return -EINVAL;
 			return -EINVAL;
 		if (m.mtus[0] < 81)	/* accommodate SACK */
 		if (m.mtus[0] < 81)	/* accommodate SACK */
@@ -2407,6 +2418,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
 			return -EBUSY;
 			return -EBUSY;
 		if (copy_from_user(&m, useraddr, sizeof(m)))
 		if (copy_from_user(&m, useraddr, sizeof(m)))
 			return -EFAULT;
 			return -EFAULT;
+		if (m.cmd != CHELSIO_SET_PM)
+			return -EINVAL;
 		if (!is_power_of_2(m.rx_pg_sz) ||
 		if (!is_power_of_2(m.rx_pg_sz) ||
 			!is_power_of_2(m.tx_pg_sz))
 			!is_power_of_2(m.tx_pg_sz))
 			return -EINVAL;	/* not power of 2 */
 			return -EINVAL;	/* not power of 2 */
@@ -2440,6 +2453,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
 			return -EIO;	/* need the memory controllers */
 			return -EIO;	/* need the memory controllers */
 		if (copy_from_user(&t, useraddr, sizeof(t)))
 		if (copy_from_user(&t, useraddr, sizeof(t)))
 			return -EFAULT;
 			return -EFAULT;
+		if (t.cmd != CHELSIO_GET_MEM)
+			return -EINVAL;
 		if ((t.addr & 7) || (t.len & 7))
 		if ((t.addr & 7) || (t.len & 7))
 			return -EINVAL;
 			return -EINVAL;
 		if (t.mem_id == MEM_CM)
 		if (t.mem_id == MEM_CM)
@@ -2492,6 +2507,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
 			return -EAGAIN;
 			return -EAGAIN;
 		if (copy_from_user(&t, useraddr, sizeof(t)))
 		if (copy_from_user(&t, useraddr, sizeof(t)))
 			return -EFAULT;
 			return -EFAULT;
+		if (t.cmd != CHELSIO_SET_TRACE_FILTER)
+			return -EINVAL;
 
 
 		tp = (const struct trace_params *)&t.sip;
 		tp = (const struct trace_params *)&t.sip;
 		if (t.config_tx)
 		if (t.config_tx)

+ 1 - 4
drivers/net/ethernet/emulex/benet/be_main.c

@@ -4002,8 +4002,6 @@ static int be_enable_vxlan_offloads(struct be_adapter *adapter)
 	netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 	netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 				   NETIF_F_TSO | NETIF_F_TSO6 |
 				   NETIF_F_TSO | NETIF_F_TSO6 |
 				   NETIF_F_GSO_UDP_TUNNEL;
 				   NETIF_F_GSO_UDP_TUNNEL;
-	netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
-	netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
 
 
 	dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
 	dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
 		 be16_to_cpu(port));
 		 be16_to_cpu(port));
@@ -4025,8 +4023,6 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter)
 	adapter->vxlan_port = 0;
 	adapter->vxlan_port = 0;
 
 
 	netdev->hw_enc_features = 0;
 	netdev->hw_enc_features = 0;
-	netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
-	netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
 }
 }
 
 
 static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
 static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
@@ -5320,6 +5316,7 @@ static void be_netdev_init(struct net_device *netdev)
 	struct be_adapter *adapter = netdev_priv(netdev);
 	struct be_adapter *adapter = netdev_priv(netdev);
 
 
 	netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
 	netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+		NETIF_F_GSO_UDP_TUNNEL |
 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
 		NETIF_F_HW_VLAN_CTAG_TX;
 		NETIF_F_HW_VLAN_CTAG_TX;
 	if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
 	if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))

+ 4 - 4
drivers/net/ethernet/freescale/fec_main.c

@@ -1158,7 +1158,7 @@ static void fec_enet_timeout_work(struct work_struct *work)
 		napi_disable(&fep->napi);
 		napi_disable(&fep->napi);
 		netif_tx_lock_bh(ndev);
 		netif_tx_lock_bh(ndev);
 		fec_restart(ndev);
 		fec_restart(ndev);
-		netif_wake_queue(ndev);
+		netif_tx_wake_all_queues(ndev);
 		netif_tx_unlock_bh(ndev);
 		netif_tx_unlock_bh(ndev);
 		napi_enable(&fep->napi);
 		napi_enable(&fep->napi);
 	}
 	}
@@ -1273,7 +1273,7 @@ skb_done:
 
 
 		/* Since we have freed up a buffer, the ring is no longer full
 		/* Since we have freed up a buffer, the ring is no longer full
 		 */
 		 */
-		if (netif_queue_stopped(ndev)) {
+		if (netif_tx_queue_stopped(nq)) {
 			entries_free = fec_enet_get_free_txdesc_num(txq);
 			entries_free = fec_enet_get_free_txdesc_num(txq);
 			if (entries_free >= txq->tx_wake_threshold)
 			if (entries_free >= txq->tx_wake_threshold)
 				netif_tx_wake_queue(nq);
 				netif_tx_wake_queue(nq);
@@ -1746,7 +1746,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
 			napi_disable(&fep->napi);
 			napi_disable(&fep->napi);
 			netif_tx_lock_bh(ndev);
 			netif_tx_lock_bh(ndev);
 			fec_restart(ndev);
 			fec_restart(ndev);
-			netif_wake_queue(ndev);
+			netif_tx_wake_all_queues(ndev);
 			netif_tx_unlock_bh(ndev);
 			netif_tx_unlock_bh(ndev);
 			napi_enable(&fep->napi);
 			napi_enable(&fep->napi);
 		}
 		}
@@ -2247,7 +2247,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
 		napi_disable(&fep->napi);
 		napi_disable(&fep->napi);
 		netif_tx_lock_bh(ndev);
 		netif_tx_lock_bh(ndev);
 		fec_restart(ndev);
 		fec_restart(ndev);
-		netif_wake_queue(ndev);
+		netif_tx_wake_all_queues(ndev);
 		netif_tx_unlock_bh(ndev);
 		netif_tx_unlock_bh(ndev);
 		napi_enable(&fep->napi);
 		napi_enable(&fep->napi);
 	}
 	}

+ 1 - 1
drivers/net/ethernet/hisilicon/hns/hnae.c

@@ -84,7 +84,7 @@ static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
 	if (cb->type == DESC_TYPE_SKB)
 	if (cb->type == DESC_TYPE_SKB)
 		dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
 		dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
 				 ring_to_dma_dir(ring));
 				 ring_to_dma_dir(ring));
-	else
+	else if (cb->length)
 		dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
 		dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
 			       ring_to_dma_dir(ring));
 			       ring_to_dma_dir(ring));
 }
 }

+ 19 - 29
drivers/net/ethernet/hisilicon/hns/hns_enet.c

@@ -40,9 +40,9 @@
 #define SKB_TMP_LEN(SKB) \
 #define SKB_TMP_LEN(SKB) \
 	(((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
 	(((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
 
 
-static void fill_v2_desc(struct hnae_ring *ring, void *priv,
-			 int size, dma_addr_t dma, int frag_end,
-			 int buf_num, enum hns_desc_type type, int mtu)
+static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
+			    int send_sz, dma_addr_t dma, int frag_end,
+			    int buf_num, enum hns_desc_type type, int mtu)
 {
 {
 	struct hnae_desc *desc = &ring->desc[ring->next_to_use];
 	struct hnae_desc *desc = &ring->desc[ring->next_to_use];
 	struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
 	struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
@@ -64,7 +64,7 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
 	desc_cb->type = type;
 	desc_cb->type = type;
 
 
 	desc->addr = cpu_to_le64(dma);
 	desc->addr = cpu_to_le64(dma);
-	desc->tx.send_size = cpu_to_le16((u16)size);
+	desc->tx.send_size = cpu_to_le16((u16)send_sz);
 
 
 	/* config bd buffer end */
 	/* config bd buffer end */
 	hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
 	hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
@@ -133,6 +133,14 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
 	ring_ptr_move_fw(ring, next_to_use);
 	ring_ptr_move_fw(ring, next_to_use);
 }
 }
 
 
+static void fill_v2_desc(struct hnae_ring *ring, void *priv,
+			 int size, dma_addr_t dma, int frag_end,
+			 int buf_num, enum hns_desc_type type, int mtu)
+{
+	fill_v2_desc_hw(ring, priv, size, size, dma, frag_end,
+			buf_num, type, mtu);
+}
+
 static const struct acpi_device_id hns_enet_acpi_match[] = {
 static const struct acpi_device_id hns_enet_acpi_match[] = {
 	{ "HISI00C1", 0 },
 	{ "HISI00C1", 0 },
 	{ "HISI00C2", 0 },
 	{ "HISI00C2", 0 },
@@ -289,15 +297,15 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv,
 
 
 	/* when the frag size is bigger than hardware, split this frag */
 	/* when the frag size is bigger than hardware, split this frag */
 	for (k = 0; k < frag_buf_num; k++)
 	for (k = 0; k < frag_buf_num; k++)
-		fill_v2_desc(ring, priv,
-			     (k == frag_buf_num - 1) ?
+		fill_v2_desc_hw(ring, priv, k == 0 ? size : 0,
+				(k == frag_buf_num - 1) ?
 					sizeoflast : BD_MAX_SEND_SIZE,
 					sizeoflast : BD_MAX_SEND_SIZE,
-			     dma + BD_MAX_SEND_SIZE * k,
-			     frag_end && (k == frag_buf_num - 1) ? 1 : 0,
-			     buf_num,
-			     (type == DESC_TYPE_SKB && !k) ?
+				dma + BD_MAX_SEND_SIZE * k,
+				frag_end && (k == frag_buf_num - 1) ? 1 : 0,
+				buf_num,
+				(type == DESC_TYPE_SKB && !k) ?
 					DESC_TYPE_SKB : DESC_TYPE_PAGE,
 					DESC_TYPE_SKB : DESC_TYPE_PAGE,
-			     mtu);
+				mtu);
 }
 }
 
 
 netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
 netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
@@ -1495,21 +1503,6 @@ static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
 	return phy_mii_ioctl(phy_dev, ifr, cmd);
 	return phy_mii_ioctl(phy_dev, ifr, cmd);
 }
 }
 
 
-/* use only for netconsole to poll with the device without interrupt */
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void hns_nic_poll_controller(struct net_device *ndev)
-{
-	struct hns_nic_priv *priv = netdev_priv(ndev);
-	unsigned long flags;
-	int i;
-
-	local_irq_save(flags);
-	for (i = 0; i < priv->ae_handle->q_num * 2; i++)
-		napi_schedule(&priv->ring_data[i].napi);
-	local_irq_restore(flags);
-}
-#endif
-
 static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
 static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
 				    struct net_device *ndev)
 				    struct net_device *ndev)
 {
 {
@@ -1962,9 +1955,6 @@ static const struct net_device_ops hns_nic_netdev_ops = {
 	.ndo_set_features = hns_nic_set_features,
 	.ndo_set_features = hns_nic_set_features,
 	.ndo_fix_features = hns_nic_fix_features,
 	.ndo_fix_features = hns_nic_fix_features,
 	.ndo_get_stats64 = hns_nic_get_stats64,
 	.ndo_get_stats64 = hns_nic_get_stats64,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	.ndo_poll_controller = hns_nic_poll_controller,
-#endif
 	.ndo_set_rx_mode = hns_nic_set_rx_mode,
 	.ndo_set_rx_mode = hns_nic_set_rx_mode,
 	.ndo_select_queue = hns_nic_select_queue,
 	.ndo_select_queue = hns_nic_select_queue,
 };
 };

+ 0 - 20
drivers/net/ethernet/huawei/hinic/hinic_main.c

@@ -789,23 +789,6 @@ static void hinic_get_stats64(struct net_device *netdev,
 	stats->tx_errors  = nic_tx_stats->tx_dropped;
 	stats->tx_errors  = nic_tx_stats->tx_dropped;
 }
 }
 
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void hinic_netpoll(struct net_device *netdev)
-{
-	struct hinic_dev *nic_dev = netdev_priv(netdev);
-	int i, num_qps;
-
-	num_qps = hinic_hwdev_num_qps(nic_dev->hwdev);
-	for (i = 0; i < num_qps; i++) {
-		struct hinic_txq *txq = &nic_dev->txqs[i];
-		struct hinic_rxq *rxq = &nic_dev->rxqs[i];
-
-		napi_schedule(&txq->napi);
-		napi_schedule(&rxq->napi);
-	}
-}
-#endif
-
 static const struct net_device_ops hinic_netdev_ops = {
 static const struct net_device_ops hinic_netdev_ops = {
 	.ndo_open = hinic_open,
 	.ndo_open = hinic_open,
 	.ndo_stop = hinic_close,
 	.ndo_stop = hinic_close,
@@ -818,9 +801,6 @@ static const struct net_device_ops hinic_netdev_ops = {
 	.ndo_start_xmit = hinic_xmit_frame,
 	.ndo_start_xmit = hinic_xmit_frame,
 	.ndo_tx_timeout = hinic_tx_timeout,
 	.ndo_tx_timeout = hinic_tx_timeout,
 	.ndo_get_stats64 = hinic_get_stats64,
 	.ndo_get_stats64 = hinic_get_stats64,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	.ndo_poll_controller = hinic_netpoll,
-#endif
 };
 };
 
 
 static void netdev_features_init(struct net_device *netdev)
 static void netdev_features_init(struct net_device *netdev)

+ 0 - 14
drivers/net/ethernet/ibm/ehea/ehea_main.c

@@ -921,17 +921,6 @@ static int ehea_poll(struct napi_struct *napi, int budget)
 	return rx;
 	return rx;
 }
 }
 
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ehea_netpoll(struct net_device *dev)
-{
-	struct ehea_port *port = netdev_priv(dev);
-	int i;
-
-	for (i = 0; i < port->num_def_qps; i++)
-		napi_schedule(&port->port_res[i].napi);
-}
-#endif
-
 static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
 static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
 {
 {
 	struct ehea_port_res *pr = param;
 	struct ehea_port_res *pr = param;
@@ -2953,9 +2942,6 @@ static const struct net_device_ops ehea_netdev_ops = {
 	.ndo_open		= ehea_open,
 	.ndo_open		= ehea_open,
 	.ndo_stop		= ehea_stop,
 	.ndo_stop		= ehea_stop,
 	.ndo_start_xmit		= ehea_start_xmit,
 	.ndo_start_xmit		= ehea_start_xmit,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	.ndo_poll_controller	= ehea_netpoll,
-#endif
 	.ndo_get_stats64	= ehea_get_stats64,
 	.ndo_get_stats64	= ehea_get_stats64,
 	.ndo_set_mac_address	= ehea_set_mac_addr,
 	.ndo_set_mac_address	= ehea_set_mac_addr,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_validate_addr	= eth_validate_addr,

+ 0 - 16
drivers/net/ethernet/ibm/ibmvnic.c

@@ -2207,19 +2207,6 @@ restart_poll:
 	return frames_processed;
 	return frames_processed;
 }
 }
 
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ibmvnic_netpoll_controller(struct net_device *dev)
-{
-	struct ibmvnic_adapter *adapter = netdev_priv(dev);
-	int i;
-
-	replenish_pools(netdev_priv(dev));
-	for (i = 0; i < adapter->req_rx_queues; i++)
-		ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
-				     adapter->rx_scrq[i]);
-}
-#endif
-
 static int wait_for_reset(struct ibmvnic_adapter *adapter)
 static int wait_for_reset(struct ibmvnic_adapter *adapter)
 {
 {
 	int rc, ret;
 	int rc, ret;
@@ -2292,9 +2279,6 @@ static const struct net_device_ops ibmvnic_netdev_ops = {
 	.ndo_set_mac_address	= ibmvnic_set_mac,
 	.ndo_set_mac_address	= ibmvnic_set_mac,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_tx_timeout		= ibmvnic_tx_timeout,
 	.ndo_tx_timeout		= ibmvnic_tx_timeout,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	.ndo_poll_controller	= ibmvnic_netpoll_controller,
-#endif
 	.ndo_change_mtu		= ibmvnic_change_mtu,
 	.ndo_change_mtu		= ibmvnic_change_mtu,
 	.ndo_features_check     = ibmvnic_features_check,
 	.ndo_features_check     = ibmvnic_features_check,
 };
 };

+ 7 - 5
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c

@@ -3196,11 +3196,13 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
 		return budget;
 		return budget;
 
 
 	/* all work done, exit the polling mode */
 	/* all work done, exit the polling mode */
-	napi_complete_done(napi, work_done);
-	if (adapter->rx_itr_setting & 1)
-		ixgbe_set_itr(q_vector);
-	if (!test_bit(__IXGBE_DOWN, &adapter->state))
-		ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
+	if (likely(napi_complete_done(napi, work_done))) {
+		if (adapter->rx_itr_setting & 1)
+			ixgbe_set_itr(q_vector);
+		if (!test_bit(__IXGBE_DOWN, &adapter->state))
+			ixgbe_irq_enable_queues(adapter,
+						BIT_ULL(q_vector->v_idx));
+	}
 
 
 	return min(work_done, budget - 1);
 	return min(work_done, budget - 1);
 }
 }

+ 5 - 4
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c

@@ -1725,7 +1725,7 @@ static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
 }
 }
 
 
 /* Set Tx descriptors fields relevant for CSUM calculation */
 /* Set Tx descriptors fields relevant for CSUM calculation */
-static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
+static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
 			       int ip_hdr_len, int l4_proto)
 			       int ip_hdr_len, int l4_proto)
 {
 {
 	u32 command;
 	u32 command;
@@ -2600,14 +2600,15 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 		int ip_hdr_len = 0;
 		int ip_hdr_len = 0;
 		u8 l4_proto;
 		u8 l4_proto;
+		__be16 l3_proto = vlan_get_protocol(skb);
 
 
-		if (skb->protocol == htons(ETH_P_IP)) {
+		if (l3_proto == htons(ETH_P_IP)) {
 			struct iphdr *ip4h = ip_hdr(skb);
 			struct iphdr *ip4h = ip_hdr(skb);
 
 
 			/* Calculate IPv4 checksum and L4 checksum */
 			/* Calculate IPv4 checksum and L4 checksum */
 			ip_hdr_len = ip4h->ihl;
 			ip_hdr_len = ip4h->ihl;
 			l4_proto = ip4h->protocol;
 			l4_proto = ip4h->protocol;
-		} else if (skb->protocol == htons(ETH_P_IPV6)) {
+		} else if (l3_proto == htons(ETH_P_IPV6)) {
 			struct ipv6hdr *ip6h = ipv6_hdr(skb);
 			struct ipv6hdr *ip6h = ipv6_hdr(skb);
 
 
 			/* Read l4_protocol from one of IPv6 extra headers */
 			/* Read l4_protocol from one of IPv6 extra headers */
@@ -2619,7 +2620,7 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
 		}
 		}
 
 
 		return mvpp2_txq_desc_csum(skb_network_offset(skb),
 		return mvpp2_txq_desc_csum(skb_network_offset(skb),
-				skb->protocol, ip_hdr_len, l4_proto);
+					   l3_proto, ip_hdr_len, l4_proto);
 	}
 	}
 
 
 	return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
 	return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;

+ 1 - 0
drivers/net/ethernet/mellanox/mlx5/core/en.h

@@ -54,6 +54,7 @@
 #include "en_stats.h"
 #include "en_stats.h"
 #include "en/fs.h"
 #include "en/fs.h"
 
 
+extern const struct net_device_ops mlx5e_netdev_ops;
 struct page_pool;
 struct page_pool;
 
 
 #define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
 #define MLX5E_METADATA_ETHER_TYPE (0x8CE4)

+ 2 - 0
drivers/net/ethernet/mellanox/mlx5/core/en/fs.h

@@ -16,6 +16,8 @@ struct mlx5e_tc_table {
 
 
 	DECLARE_HASHTABLE(mod_hdr_tbl, 8);
 	DECLARE_HASHTABLE(mod_hdr_tbl, 8);
 	DECLARE_HASHTABLE(hairpin_tbl, 8);
 	DECLARE_HASHTABLE(hairpin_tbl, 8);
+
+	struct notifier_block     netdevice_nb;
 };
 };
 
 
 struct mlx5e_flow_table {
 struct mlx5e_flow_table {

+ 1 - 1
drivers/net/ethernet/mellanox/mlx5/core/en_main.c

@@ -4315,7 +4315,7 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
 	}
 	}
 }
 }
 
 
-static const struct net_device_ops mlx5e_netdev_ops = {
+const struct net_device_ops mlx5e_netdev_ops = {
 	.ndo_open                = mlx5e_open,
 	.ndo_open                = mlx5e_open,
 	.ndo_stop                = mlx5e_close,
 	.ndo_stop                = mlx5e_close,
 	.ndo_start_xmit          = mlx5e_xmit,
 	.ndo_start_xmit          = mlx5e_xmit,

部分文件因为文件数量过多而无法显示