Browse Source

Merge remote-tracking branch 'tip/perf/urgent' into perf/core

To pick up fixes.

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Arnaldo Carvalho de Melo 7 years ago
parent
commit
7869e58894
100 changed files with 735 additions and 433 deletions
  1. 1 0
      Documentation/devicetree/bindings/net/dsa/b53.txt
  2. 1 1
      Documentation/i2c/busses/i2c-ocores
  3. 8 0
      MAINTAINERS
  4. 4 0
      arch/mips/kernel/process.c
  5. 1 1
      arch/mips/kernel/ptrace.c
  6. 1 1
      arch/mips/kernel/ptrace32.c
  7. 1 1
      arch/s390/purgatory/Makefile
  8. 2 2
      drivers/atm/zatm.c
  9. 2 2
      drivers/crypto/inside-secure/safexcel.c
  10. 29 15
      drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
  11. 4 11
      drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
  12. 1 0
      drivers/gpu/drm/drm_dp_helper.c
  13. 11 4
      drivers/gpu/drm/i915/i915_query.c
  14. 40 11
      drivers/gpu/drm/i915/intel_lvds.c
  15. 1 1
      drivers/gpu/drm/meson/meson_dw_hdmi.c
  16. 4 1
      drivers/gpu/drm/omapdrm/dss/sdi.c
  17. 3 3
      drivers/hwtracing/intel_th/msu.c
  18. 4 3
      drivers/hwtracing/stm/core.c
  19. 1 1
      drivers/i2c/busses/i2c-ocores.c
  20. 1 0
      drivers/iio/adc/Kconfig
  21. 24 51
      drivers/iio/adc/ad7793.c
  22. 37 4
      drivers/iio/adc/at91-sama5d2_adc.c
  23. 14 3
      drivers/iio/adc/stm32-dfsdm-adc.c
  24. 1 1
      drivers/iio/buffer/industrialio-buffer-dma.c
  25. 9 2
      drivers/iio/buffer/kfifo_buf.c
  26. 4 4
      drivers/iio/common/hid-sensors/hid-sensor-trigger.c
  27. 1 1
      drivers/infiniband/core/cache.c
  28. 54 1
      drivers/infiniband/hw/bnxt_re/main.c
  29. 60 34
      drivers/infiniband/hw/bnxt_re/qplib_fp.c
  30. 3 0
      drivers/infiniband/hw/bnxt_re/qplib_fp.h
  31. 43 18
      drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
  32. 3 0
      drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
  33. 1 1
      drivers/infiniband/ulp/srpt/Kconfig
  34. 11 11
      drivers/input/mouse/elan_i2c_smbus.c
  35. 6 0
      drivers/input/mouse/synaptics.c
  36. 13 0
      drivers/net/dsa/b53/b53_common.c
  37. 4 1
      drivers/net/dsa/b53/b53_mdio.c
  38. 1 0
      drivers/net/dsa/b53/b53_priv.h
  39. 3 1
      drivers/net/ethernet/emulex/benet/be_main.c
  40. 4 5
      drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
  41. 5 0
      drivers/net/ethernet/mellanox/mlxsw/spectrum.c
  42. 1 1
      drivers/net/ethernet/natsemi/sonic.c
  43. 2 2
      drivers/net/ethernet/socionext/netsec.c
  44. 12 10
      drivers/net/ethernet/ti/davinci_emac.c
  45. 9 6
      drivers/net/tun.c
  46. 1 1
      drivers/net/usb/cdc_mbim.c
  47. 1 0
      drivers/net/usb/qmi_wwan.c
  48. 5 5
      drivers/net/wireless/intel/iwlwifi/pcie/trans.c
  49. 3 4
      drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
  50. 1 1
      drivers/nvme/host/Kconfig
  51. 1 1
      drivers/nvme/host/core.c
  52. 1 1
      drivers/nvme/target/Kconfig
  53. 13 10
      drivers/platform/x86/asus-wmi.c
  54. 5 2
      drivers/s390/block/dasd.c
  55. 20 2
      drivers/scsi/scsi_transport_srp.c
  56. 0 36
      drivers/soc/lantiq/gphy.c
  57. 1 1
      drivers/staging/lustre/lnet/Kconfig
  58. 1 1
      drivers/thunderbolt/icm.c
  59. 10 15
      drivers/vfio/vfio_iommu_type1.c
  60. 24 13
      drivers/vhost/net.c
  61. 3 7
      fs/afs/security.c
  62. 10 9
      fs/afs/vlclient.c
  63. 1 1
      fs/cifs/Kconfig
  64. 1 0
      fs/inode.c
  65. 1 1
      include/drm/bridge/dw_hdmi.h
  66. 3 3
      include/linux/iio/buffer_impl.h
  67. 2 0
      include/uapi/linux/bpf.h
  68. 31 14
      kernel/sched/core.c
  69. 3 3
      kernel/sched/deadline.c
  70. 1 1
      kernel/sched/sched.h
  71. 6 6
      kernel/trace/trace.c
  72. 11 0
      kernel/trace/trace.h
  73. 11 4
      kernel/trace/trace_events_trigger.c
  74. 1 1
      mm/huge_memory.c
  75. 1 1
      mm/vmscan.c
  76. 1 1
      net/9p/Kconfig
  77. 2 1
      net/bridge/netfilter/ebtables.c
  78. 3 3
      net/core/net-sysfs.c
  79. 4 4
      net/ipv4/ip_tunnel.c
  80. 8 3
      net/ipv6/ip6_tunnel.c
  81. 2 2
      net/ipv6/seg6_iptunnel.c
  82. 3 2
      net/ipv6/sit.c
  83. 1 1
      net/ipv6/xfrm6_policy.c
  84. 1 1
      net/kcm/kcmsock.c
  85. 1 1
      net/ncsi/ncsi-netlink.c
  86. 15 6
      net/netfilter/ipvs/ip_vs_ctl.c
  87. 5 3
      net/netfilter/nf_tables_api.c
  88. 2 2
      net/netfilter/nf_tables_core.c
  89. 1 1
      net/netfilter/nfnetlink_acct.c
  90. 2 2
      net/netfilter/nfnetlink_cthelper.c
  91. 12 8
      net/netfilter/nft_ct.c
  92. 24 14
      net/netfilter/nft_limit.c
  93. 8 6
      net/netfilter/nft_meta.c
  94. 1 1
      net/rds/Kconfig
  95. 1 1
      net/sched/cls_flower.c
  96. 1 1
      net/sunrpc/Kconfig
  97. 2 3
      net/xfrm/xfrm_policy.c
  98. 1 1
      security/selinux/ss/services.c
  99. 14 6
      tools/arch/x86/include/asm/cpufeatures.h
  100. 2 0
      tools/include/uapi/linux/bpf.h

+ 1 - 0
Documentation/devicetree/bindings/net/dsa/b53.txt

@@ -10,6 +10,7 @@ Required properties:
       "brcm,bcm53128"
       "brcm,bcm53128"
       "brcm,bcm5365"
       "brcm,bcm5365"
       "brcm,bcm5395"
       "brcm,bcm5395"
+      "brcm,bcm5389"
       "brcm,bcm5397"
       "brcm,bcm5397"
       "brcm,bcm5398"
       "brcm,bcm5398"
 
 

+ 1 - 1
Documentation/i2c/busses/i2c-ocores

@@ -2,7 +2,7 @@ Kernel driver i2c-ocores
 
 
 Supported adapters:
 Supported adapters:
   * OpenCores.org I2C controller by Richard Herveille (see datasheet link)
   * OpenCores.org I2C controller by Richard Herveille (see datasheet link)
-    Datasheet: http://www.opencores.org/projects.cgi/web/i2c/overview
+    https://opencores.org/project/i2c/overview
 
 
 Author: Peter Korsgaard <jacmet@sunsite.dk>
 Author: Peter Korsgaard <jacmet@sunsite.dk>
 
 

+ 8 - 0
MAINTAINERS

@@ -15513,6 +15513,14 @@ L:	linux-kernel@vger.kernel.org
 S:	Supported
 S:	Supported
 F:	drivers/char/xillybus/
 F:	drivers/char/xillybus/
 
 
+XLP9XX I2C DRIVER
+M:	George Cherian <george.cherian@cavium.com>
+M:	Jan Glauber <jglauber@cavium.com>
+L:	linux-i2c@vger.kernel.org
+W:	http://www.cavium.com
+S:	Supported
+F:	drivers/i2c/busses/i2c-xlp9xx.c
+
 XRA1403 GPIO EXPANDER
 XRA1403 GPIO EXPANDER
 M:	Nandor Han <nandor.han@ge.com>
 M:	Nandor Han <nandor.han@ge.com>
 M:	Semi Malinen <semi.malinen@ge.com>
 M:	Semi Malinen <semi.malinen@ge.com>

+ 4 - 0
arch/mips/kernel/process.c

@@ -721,6 +721,10 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
 	if (value & ~known_bits)
 	if (value & ~known_bits)
 		return -EOPNOTSUPP;
 		return -EOPNOTSUPP;
 
 
+	/* Setting FRE without FR is not supported.  */
+	if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE)
+		return -EOPNOTSUPP;
+
 	/* Avoid inadvertently triggering emulation */
 	/* Avoid inadvertently triggering emulation */
 	if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
 	if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
 	    !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
 	    !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))

+ 1 - 1
arch/mips/kernel/ptrace.c

@@ -818,7 +818,7 @@ long arch_ptrace(struct task_struct *child, long request,
 				break;
 				break;
 			}
 			}
 #endif
 #endif
-			tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
+			tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
 			break;
 			break;
 		case PC:
 		case PC:
 			tmp = regs->cp0_epc;
 			tmp = regs->cp0_epc;

+ 1 - 1
arch/mips/kernel/ptrace32.c

@@ -109,7 +109,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
 						addr & 1);
 						addr & 1);
 				break;
 				break;
 			}
 			}
-			tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
+			tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
 			break;
 			break;
 		case PC:
 		case PC:
 			tmp = regs->cp0_epc;
 			tmp = regs->cp0_epc;

+ 1 - 1
arch/s390/purgatory/Makefile

@@ -21,7 +21,7 @@ LDFLAGS_purgatory.ro += -z nodefaultlib
 KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes
 KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes
 KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
 KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
 KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding
 KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding
-KBUILD_CFLAGS += -c -MD -Os -m64
+KBUILD_CFLAGS += -c -MD -Os -m64 -msoft-float
 KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
 KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
 
 
 $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
 $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE

+ 2 - 2
drivers/atm/zatm.c

@@ -1151,8 +1151,8 @@ static void eprom_get_byte(struct zatm_dev *zatm_dev, unsigned char *byte,
 }
 }
 
 
 
 
-static unsigned char eprom_try_esi(struct atm_dev *dev, unsigned short cmd,
-				   int offset, int swap)
+static int eprom_try_esi(struct atm_dev *dev, unsigned short cmd, int offset,
+			 int swap)
 {
 {
 	unsigned char buf[ZEPROM_SIZE];
 	unsigned char buf[ZEPROM_SIZE];
 	struct zatm_dev *zatm_dev;
 	struct zatm_dev *zatm_dev;

+ 2 - 2
drivers/crypto/inside-secure/safexcel.c

@@ -152,8 +152,8 @@ static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
 	       EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
 	       EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
 	writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
 	writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
 
 
-	memset(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0,
-	       EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
+	memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0,
+		  EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
 
 
 	eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL,
 	eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL,
 			      EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
 			      EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);

+ 29 - 15
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c

@@ -4555,8 +4555,8 @@ static int dm_update_crtcs_state(struct dc *dc,
 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
 		struct amdgpu_crtc *acrtc = NULL;
 		struct amdgpu_crtc *acrtc = NULL;
 		struct amdgpu_dm_connector *aconnector = NULL;
 		struct amdgpu_dm_connector *aconnector = NULL;
-		struct drm_connector_state *new_con_state = NULL;
-		struct dm_connector_state *dm_conn_state = NULL;
+		struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
+		struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
 		struct drm_plane_state *new_plane_state = NULL;
 		struct drm_plane_state *new_plane_state = NULL;
 
 
 		new_stream = NULL;
 		new_stream = NULL;
@@ -4577,19 +4577,23 @@ static int dm_update_crtcs_state(struct dc *dc,
 		/* TODO This hack should go away */
 		/* TODO This hack should go away */
 		if (aconnector && enable) {
 		if (aconnector && enable) {
 			// Make sure fake sink is created in plug-in scenario
 			// Make sure fake sink is created in plug-in scenario
-			new_con_state = drm_atomic_get_connector_state(state,
+			drm_new_conn_state = drm_atomic_get_new_connector_state(state,
  								    &aconnector->base);
  								    &aconnector->base);
+			drm_old_conn_state = drm_atomic_get_old_connector_state(state,
+								    &aconnector->base);
 
 
-			if (IS_ERR(new_con_state)) {
-				ret = PTR_ERR_OR_ZERO(new_con_state);
+
+			if (IS_ERR(drm_new_conn_state)) {
+				ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
 				break;
 				break;
 			}
 			}
 
 
-			dm_conn_state = to_dm_connector_state(new_con_state);
+			dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
+			dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
 
 
 			new_stream = create_stream_for_sink(aconnector,
 			new_stream = create_stream_for_sink(aconnector,
 							     &new_crtc_state->mode,
 							     &new_crtc_state->mode,
-							    dm_conn_state);
+							    dm_new_conn_state);
 
 
 			/*
 			/*
 			 * we can have no stream on ACTION_SET if a display
 			 * we can have no stream on ACTION_SET if a display
@@ -4695,20 +4699,30 @@ next_crtc:
 		 * We want to do dc stream updates that do not require a
 		 * We want to do dc stream updates that do not require a
 		 * full modeset below.
 		 * full modeset below.
 		 */
 		 */
-		if (!enable || !aconnector || modereset_required(new_crtc_state))
+		if (!(enable && aconnector && new_crtc_state->enable &&
+		      new_crtc_state->active))
 			continue;
 			continue;
 		/*
 		/*
 		 * Given above conditions, the dc state cannot be NULL because:
 		 * Given above conditions, the dc state cannot be NULL because:
-		 * 1. We're attempting to enable a CRTC. Which has a...
-		 * 2. Valid connector attached, and
-		 * 3. User does not want to reset it (disable or mark inactive,
-		 *    which can happen on a CRTC that's already disabled).
-		 * => It currently exists.
+		 * 1. We're in the process of enabling CRTCs (just been added
+		 *    to the dc context, or already is on the context)
+		 * 2. Has a valid connector attached, and
+		 * 3. Is currently active and enabled.
+		 * => The dc stream state currently exists.
 		 */
 		 */
 		BUG_ON(dm_new_crtc_state->stream == NULL);
 		BUG_ON(dm_new_crtc_state->stream == NULL);
 
 
-		/* Color managment settings */
-		if (dm_new_crtc_state->base.color_mgmt_changed) {
+		/* Scaling or underscan settings */
+		if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
+			update_stream_scaling_settings(
+				&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
+
+		/*
+		 * Color management settings. We also update color properties
+		 * when a modeset is needed, to ensure it gets reprogrammed.
+		 */
+		if (dm_new_crtc_state->base.color_mgmt_changed ||
+		    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
 			ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state);
 			ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state);
 			if (ret)
 			if (ret)
 				goto fail;
 				goto fail;

+ 4 - 11
drivers/gpu/drm/bridge/synopsys/dw-hdmi.c

@@ -2077,7 +2077,7 @@ static irqreturn_t dw_hdmi_hardirq(int irq, void *dev_id)
 	return ret;
 	return ret;
 }
 }
 
 
-void __dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense)
+void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense)
 {
 {
 	mutex_lock(&hdmi->mutex);
 	mutex_lock(&hdmi->mutex);
 
 
@@ -2103,13 +2103,6 @@ void __dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense)
 	}
 	}
 	mutex_unlock(&hdmi->mutex);
 	mutex_unlock(&hdmi->mutex);
 }
 }
-
-void dw_hdmi_setup_rx_sense(struct device *dev, bool hpd, bool rx_sense)
-{
-	struct dw_hdmi *hdmi = dev_get_drvdata(dev);
-
-	__dw_hdmi_setup_rx_sense(hdmi, hpd, rx_sense);
-}
 EXPORT_SYMBOL_GPL(dw_hdmi_setup_rx_sense);
 EXPORT_SYMBOL_GPL(dw_hdmi_setup_rx_sense);
 
 
 static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
 static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
@@ -2145,9 +2138,9 @@ static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
 	 */
 	 */
 	if (intr_stat &
 	if (intr_stat &
 	    (HDMI_IH_PHY_STAT0_RX_SENSE | HDMI_IH_PHY_STAT0_HPD)) {
 	    (HDMI_IH_PHY_STAT0_RX_SENSE | HDMI_IH_PHY_STAT0_HPD)) {
-		__dw_hdmi_setup_rx_sense(hdmi,
-					 phy_stat & HDMI_PHY_HPD,
-					 phy_stat & HDMI_PHY_RX_SENSE);
+		dw_hdmi_setup_rx_sense(hdmi,
+				       phy_stat & HDMI_PHY_HPD,
+				       phy_stat & HDMI_PHY_RX_SENSE);
 
 
 		if ((phy_stat & (HDMI_PHY_RX_SENSE | HDMI_PHY_HPD)) == 0)
 		if ((phy_stat & (HDMI_PHY_RX_SENSE | HDMI_PHY_HPD)) == 0)
 			cec_notifier_set_phys_addr(hdmi->cec_notifier,
 			cec_notifier_set_phys_addr(hdmi->cec_notifier,

+ 1 - 0
drivers/gpu/drm/drm_dp_helper.c

@@ -1145,6 +1145,7 @@ int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE])
 	static const u16 psr_setup_time_us[] = {
 	static const u16 psr_setup_time_us[] = {
 		PSR_SETUP_TIME(330),
 		PSR_SETUP_TIME(330),
 		PSR_SETUP_TIME(275),
 		PSR_SETUP_TIME(275),
+		PSR_SETUP_TIME(220),
 		PSR_SETUP_TIME(165),
 		PSR_SETUP_TIME(165),
 		PSR_SETUP_TIME(110),
 		PSR_SETUP_TIME(110),
 		PSR_SETUP_TIME(55),
 		PSR_SETUP_TIME(55),

+ 11 - 4
drivers/gpu/drm/i915/i915_query.c

@@ -4,6 +4,8 @@
  * Copyright © 2018 Intel Corporation
  * Copyright © 2018 Intel Corporation
  */
  */
 
 
+#include <linux/nospec.h>
+
 #include "i915_drv.h"
 #include "i915_drv.h"
 #include "i915_query.h"
 #include "i915_query.h"
 #include <uapi/drm/i915_drm.h>
 #include <uapi/drm/i915_drm.h>
@@ -100,7 +102,7 @@ int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 
 
 	for (i = 0; i < args->num_items; i++, user_item_ptr++) {
 	for (i = 0; i < args->num_items; i++, user_item_ptr++) {
 		struct drm_i915_query_item item;
 		struct drm_i915_query_item item;
-		u64 func_idx;
+		unsigned long func_idx;
 		int ret;
 		int ret;
 
 
 		if (copy_from_user(&item, user_item_ptr, sizeof(item)))
 		if (copy_from_user(&item, user_item_ptr, sizeof(item)))
@@ -109,12 +111,17 @@ int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 		if (item.query_id == 0)
 		if (item.query_id == 0)
 			return -EINVAL;
 			return -EINVAL;
 
 
+		if (overflows_type(item.query_id - 1, unsigned long))
+			return -EINVAL;
+
 		func_idx = item.query_id - 1;
 		func_idx = item.query_id - 1;
 
 
-		if (func_idx < ARRAY_SIZE(i915_query_funcs))
+		ret = -EINVAL;
+		if (func_idx < ARRAY_SIZE(i915_query_funcs)) {
+			func_idx = array_index_nospec(func_idx,
+						      ARRAY_SIZE(i915_query_funcs));
 			ret = i915_query_funcs[func_idx](dev_priv, &item);
 			ret = i915_query_funcs[func_idx](dev_priv, &item);
-		else
-			ret = -EINVAL;
+		}
 
 
 		/* Only write the length back to userspace if they differ. */
 		/* Only write the length back to userspace if they differ. */
 		if (ret != item.length && put_user(ret, &user_item_ptr->length))
 		if (ret != item.length && put_user(ret, &user_item_ptr->length))

+ 40 - 11
drivers/gpu/drm/i915/intel_lvds.c

@@ -574,6 +574,36 @@ exit:
 	return NOTIFY_OK;
 	return NOTIFY_OK;
 }
 }
 
 
+static int
+intel_lvds_connector_register(struct drm_connector *connector)
+{
+	struct intel_lvds_connector *lvds = to_lvds_connector(connector);
+	int ret;
+
+	ret = intel_connector_register(connector);
+	if (ret)
+		return ret;
+
+	lvds->lid_notifier.notifier_call = intel_lid_notify;
+	if (acpi_lid_notifier_register(&lvds->lid_notifier)) {
+		DRM_DEBUG_KMS("lid notifier registration failed\n");
+		lvds->lid_notifier.notifier_call = NULL;
+	}
+
+	return 0;
+}
+
+static void
+intel_lvds_connector_unregister(struct drm_connector *connector)
+{
+	struct intel_lvds_connector *lvds = to_lvds_connector(connector);
+
+	if (lvds->lid_notifier.notifier_call)
+		acpi_lid_notifier_unregister(&lvds->lid_notifier);
+
+	intel_connector_unregister(connector);
+}
+
 /**
 /**
  * intel_lvds_destroy - unregister and free LVDS structures
  * intel_lvds_destroy - unregister and free LVDS structures
  * @connector: connector to free
  * @connector: connector to free
@@ -586,9 +616,6 @@ static void intel_lvds_destroy(struct drm_connector *connector)
 	struct intel_lvds_connector *lvds_connector =
 	struct intel_lvds_connector *lvds_connector =
 		to_lvds_connector(connector);
 		to_lvds_connector(connector);
 
 
-	if (lvds_connector->lid_notifier.notifier_call)
-		acpi_lid_notifier_unregister(&lvds_connector->lid_notifier);
-
 	if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
 	if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
 		kfree(lvds_connector->base.edid);
 		kfree(lvds_connector->base.edid);
 
 
@@ -609,8 +636,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.atomic_get_property = intel_digital_connector_atomic_get_property,
 	.atomic_get_property = intel_digital_connector_atomic_get_property,
 	.atomic_set_property = intel_digital_connector_atomic_set_property,
 	.atomic_set_property = intel_digital_connector_atomic_set_property,
-	.late_register = intel_connector_register,
-	.early_unregister = intel_connector_unregister,
+	.late_register = intel_lvds_connector_register,
+	.early_unregister = intel_lvds_connector_unregister,
 	.destroy = intel_lvds_destroy,
 	.destroy = intel_lvds_destroy,
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 	.atomic_duplicate_state = intel_digital_connector_duplicate_state,
 	.atomic_duplicate_state = intel_digital_connector_duplicate_state,
@@ -827,6 +854,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"),
 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"),
 		},
 		},
 	},
 	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Radiant P845",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Radiant Systems Inc"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "P845"),
+		},
+	},
 
 
 	{ }	/* terminating entry */
 	{ }	/* terminating entry */
 };
 };
@@ -1150,12 +1185,6 @@ out:
 
 
 	lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK;
 	lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK;
 
 
-	lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
-	if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
-		DRM_DEBUG_KMS("lid notifier registration failed\n");
-		lvds_connector->lid_notifier.notifier_call = NULL;
-	}
-
 	return;
 	return;
 
 
 failed:
 failed:

+ 1 - 1
drivers/gpu/drm/meson/meson_dw_hdmi.c

@@ -529,7 +529,7 @@ static irqreturn_t dw_hdmi_top_thread_irq(int irq, void *dev_id)
 		if (stat & HDMITX_TOP_INTR_HPD_RISE)
 		if (stat & HDMITX_TOP_INTR_HPD_RISE)
 			hpd_connected = true;
 			hpd_connected = true;
 
 
-		dw_hdmi_setup_rx_sense(dw_hdmi->dev, hpd_connected,
+		dw_hdmi_setup_rx_sense(dw_hdmi->hdmi, hpd_connected,
 				       hpd_connected);
 				       hpd_connected);
 
 
 		drm_helper_hpd_irq_event(dw_hdmi->encoder.dev);
 		drm_helper_hpd_irq_event(dw_hdmi->encoder.dev);

+ 4 - 1
drivers/gpu/drm/omapdrm/dss/sdi.c

@@ -82,7 +82,7 @@ static int sdi_calc_clock_div(struct sdi_device *sdi, unsigned long pclk,
 			      struct dispc_clock_info *dispc_cinfo)
 			      struct dispc_clock_info *dispc_cinfo)
 {
 {
 	int i;
 	int i;
-	struct sdi_clk_calc_ctx ctx = { .sdi = sdi };
+	struct sdi_clk_calc_ctx ctx;
 
 
 	/*
 	/*
 	 * DSS fclk gives us very few possibilities, so finding a good pixel
 	 * DSS fclk gives us very few possibilities, so finding a good pixel
@@ -95,6 +95,9 @@ static int sdi_calc_clock_div(struct sdi_device *sdi, unsigned long pclk,
 		bool ok;
 		bool ok;
 
 
 		memset(&ctx, 0, sizeof(ctx));
 		memset(&ctx, 0, sizeof(ctx));
+
+		ctx.sdi = sdi;
+
 		if (pclk > 1000 * i * i * i)
 		if (pclk > 1000 * i * i * i)
 			ctx.pck_min = max(pclk - 1000 * i * i * i, 0lu);
 			ctx.pck_min = max(pclk - 1000 * i * i * i, 0lu);
 		else
 		else

+ 3 - 3
drivers/hwtracing/intel_th/msu.c

@@ -733,8 +733,8 @@ err_nomem:
 		/* Reset the page to write-back before releasing */
 		/* Reset the page to write-back before releasing */
 		set_memory_wb((unsigned long)win->block[i].bdesc, 1);
 		set_memory_wb((unsigned long)win->block[i].bdesc, 1);
 #endif
 #endif
-		dma_free_coherent(msc_dev(msc), size, win->block[i].bdesc,
-				  win->block[i].addr);
+		dma_free_coherent(msc_dev(msc)->parent->parent, size,
+				  win->block[i].bdesc, win->block[i].addr);
 	}
 	}
 	kfree(win);
 	kfree(win);
 
 
@@ -769,7 +769,7 @@ static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
 		/* Reset the page to write-back before releasing */
 		/* Reset the page to write-back before releasing */
 		set_memory_wb((unsigned long)win->block[i].bdesc, 1);
 		set_memory_wb((unsigned long)win->block[i].bdesc, 1);
 #endif
 #endif
-		dma_free_coherent(msc_dev(win->msc), PAGE_SIZE,
+		dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
 				  win->block[i].bdesc, win->block[i].addr);
 				  win->block[i].bdesc, win->block[i].addr);
 	}
 	}
 
 

+ 4 - 3
drivers/hwtracing/stm/core.c

@@ -19,6 +19,7 @@
 #include <linux/stm.h>
 #include <linux/stm.h>
 #include <linux/fs.h>
 #include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/mm.h>
+#include <linux/vmalloc.h>
 #include "stm.h"
 #include "stm.h"
 
 
 #include <uapi/linux/stm.h>
 #include <uapi/linux/stm.h>
@@ -674,7 +675,7 @@ static void stm_device_release(struct device *dev)
 {
 {
 	struct stm_device *stm = to_stm_device(dev);
 	struct stm_device *stm = to_stm_device(dev);
 
 
-	kfree(stm);
+	vfree(stm);
 }
 }
 
 
 int stm_register_device(struct device *parent, struct stm_data *stm_data,
 int stm_register_device(struct device *parent, struct stm_data *stm_data,
@@ -691,7 +692,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
 		return -EINVAL;
 		return -EINVAL;
 
 
 	nmasters = stm_data->sw_end - stm_data->sw_start + 1;
 	nmasters = stm_data->sw_end - stm_data->sw_start + 1;
-	stm = kzalloc(sizeof(*stm) + nmasters * sizeof(void *), GFP_KERNEL);
+	stm = vzalloc(sizeof(*stm) + nmasters * sizeof(void *));
 	if (!stm)
 	if (!stm)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
@@ -744,7 +745,7 @@ err_device:
 	/* matches device_initialize() above */
 	/* matches device_initialize() above */
 	put_device(&stm->dev);
 	put_device(&stm->dev);
 err_free:
 err_free:
-	kfree(stm);
+	vfree(stm);
 
 
 	return err;
 	return err;
 }
 }

+ 1 - 1
drivers/i2c/busses/i2c-ocores.c

@@ -1,6 +1,6 @@
 /*
 /*
  * i2c-ocores.c: I2C bus driver for OpenCores I2C controller
  * i2c-ocores.c: I2C bus driver for OpenCores I2C controller
- * (http://www.opencores.org/projects.cgi/web/i2c/overview).
+ * (https://opencores.org/project/i2c/overview)
  *
  *
  * Peter Korsgaard <jacmet@sunsite.dk>
  * Peter Korsgaard <jacmet@sunsite.dk>
  *
  *

+ 1 - 0
drivers/iio/adc/Kconfig

@@ -158,6 +158,7 @@ config AT91_SAMA5D2_ADC
 	depends on ARCH_AT91 || COMPILE_TEST
 	depends on ARCH_AT91 || COMPILE_TEST
 	depends on HAS_IOMEM
 	depends on HAS_IOMEM
 	depends on HAS_DMA
 	depends on HAS_DMA
+	select IIO_BUFFER
 	select IIO_TRIGGERED_BUFFER
 	select IIO_TRIGGERED_BUFFER
 	help
 	help
 	  Say yes here to build support for Atmel SAMA5D2 ADC which is
 	  Say yes here to build support for Atmel SAMA5D2 ADC which is

+ 24 - 51
drivers/iio/adc/ad7793.c

@@ -348,55 +348,6 @@ static const u16 ad7793_sample_freq_avail[16] = {0, 470, 242, 123, 62, 50, 39,
 static const u16 ad7797_sample_freq_avail[16] = {0, 0, 0, 123, 62, 50, 0,
 static const u16 ad7797_sample_freq_avail[16] = {0, 0, 0, 123, 62, 50, 0,
 					33, 0, 17, 16, 12, 10, 8, 6, 4};
 					33, 0, 17, 16, 12, 10, 8, 6, 4};
 
 
-static ssize_t ad7793_read_frequency(struct device *dev,
-		struct device_attribute *attr,
-		char *buf)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct ad7793_state *st = iio_priv(indio_dev);
-
-	return sprintf(buf, "%d\n",
-	       st->chip_info->sample_freq_avail[AD7793_MODE_RATE(st->mode)]);
-}
-
-static ssize_t ad7793_write_frequency(struct device *dev,
-		struct device_attribute *attr,
-		const char *buf,
-		size_t len)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct ad7793_state *st = iio_priv(indio_dev);
-	long lval;
-	int i, ret;
-
-	ret = kstrtol(buf, 10, &lval);
-	if (ret)
-		return ret;
-
-	if (lval == 0)
-		return -EINVAL;
-
-	for (i = 0; i < 16; i++)
-		if (lval == st->chip_info->sample_freq_avail[i])
-			break;
-	if (i == 16)
-		return -EINVAL;
-
-	ret = iio_device_claim_direct_mode(indio_dev);
-	if (ret)
-		return ret;
-	st->mode &= ~AD7793_MODE_RATE(-1);
-	st->mode |= AD7793_MODE_RATE(i);
-	ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode), st->mode);
-	iio_device_release_direct_mode(indio_dev);
-
-	return len;
-}
-
-static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
-		ad7793_read_frequency,
-		ad7793_write_frequency);
-
 static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
 static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
 	"470 242 123 62 50 39 33 19 17 16 12 10 8 6 4");
 	"470 242 123 62 50 39 33 19 17 16 12 10 8 6 4");
 
 
@@ -424,7 +375,6 @@ static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available,
 		ad7793_show_scale_available, NULL, 0);
 		ad7793_show_scale_available, NULL, 0);
 
 
 static struct attribute *ad7793_attributes[] = {
 static struct attribute *ad7793_attributes[] = {
-	&iio_dev_attr_sampling_frequency.dev_attr.attr,
 	&iio_const_attr_sampling_frequency_available.dev_attr.attr,
 	&iio_const_attr_sampling_frequency_available.dev_attr.attr,
 	&iio_dev_attr_in_m_in_scale_available.dev_attr.attr,
 	&iio_dev_attr_in_m_in_scale_available.dev_attr.attr,
 	NULL
 	NULL
@@ -435,7 +385,6 @@ static const struct attribute_group ad7793_attribute_group = {
 };
 };
 
 
 static struct attribute *ad7797_attributes[] = {
 static struct attribute *ad7797_attributes[] = {
-	&iio_dev_attr_sampling_frequency.dev_attr.attr,
 	&iio_const_attr_sampling_frequency_available_ad7797.dev_attr.attr,
 	&iio_const_attr_sampling_frequency_available_ad7797.dev_attr.attr,
 	NULL
 	NULL
 };
 };
@@ -505,6 +454,10 @@ static int ad7793_read_raw(struct iio_dev *indio_dev,
 			*val -= offset;
 			*val -= offset;
 		}
 		}
 		return IIO_VAL_INT;
 		return IIO_VAL_INT;
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		*val = st->chip_info
+			       ->sample_freq_avail[AD7793_MODE_RATE(st->mode)];
+		return IIO_VAL_INT;
 	}
 	}
 	return -EINVAL;
 	return -EINVAL;
 }
 }
@@ -542,6 +495,26 @@ static int ad7793_write_raw(struct iio_dev *indio_dev,
 				break;
 				break;
 			}
 			}
 		break;
 		break;
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		if (!val) {
+			ret = -EINVAL;
+			break;
+		}
+
+		for (i = 0; i < 16; i++)
+			if (val == st->chip_info->sample_freq_avail[i])
+				break;
+
+		if (i == 16) {
+			ret = -EINVAL;
+			break;
+		}
+
+		st->mode &= ~AD7793_MODE_RATE(-1);
+		st->mode |= AD7793_MODE_RATE(i);
+		ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode),
+				st->mode);
+		break;
 	default:
 	default:
 		ret = -EINVAL;
 		ret = -EINVAL;
 	}
 	}

+ 37 - 4
drivers/iio/adc/at91-sama5d2_adc.c

@@ -333,6 +333,27 @@ static const struct iio_chan_spec at91_adc_channels[] = {
 				+ AT91_SAMA5D2_DIFF_CHAN_CNT + 1),
 				+ AT91_SAMA5D2_DIFF_CHAN_CNT + 1),
 };
 };
 
 
+static int at91_adc_chan_xlate(struct iio_dev *indio_dev, int chan)
+{
+	int i;
+
+	for (i = 0; i < indio_dev->num_channels; i++) {
+		if (indio_dev->channels[i].scan_index == chan)
+			return i;
+	}
+	return -EINVAL;
+}
+
+static inline struct iio_chan_spec const *
+at91_adc_chan_get(struct iio_dev *indio_dev, int chan)
+{
+	int index = at91_adc_chan_xlate(indio_dev, chan);
+
+	if (index < 0)
+		return NULL;
+	return indio_dev->channels + index;
+}
+
 static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
 static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
 {
 {
 	struct iio_dev *indio = iio_trigger_get_drvdata(trig);
 	struct iio_dev *indio = iio_trigger_get_drvdata(trig);
@@ -350,8 +371,10 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
 	at91_adc_writel(st, AT91_SAMA5D2_TRGR, status);
 	at91_adc_writel(st, AT91_SAMA5D2_TRGR, status);
 
 
 	for_each_set_bit(bit, indio->active_scan_mask, indio->num_channels) {
 	for_each_set_bit(bit, indio->active_scan_mask, indio->num_channels) {
-		struct iio_chan_spec const *chan = indio->channels + bit;
+		struct iio_chan_spec const *chan = at91_adc_chan_get(indio, bit);
 
 
+		if (!chan)
+			continue;
 		if (state) {
 		if (state) {
 			at91_adc_writel(st, AT91_SAMA5D2_CHER,
 			at91_adc_writel(st, AT91_SAMA5D2_CHER,
 					BIT(chan->channel));
 					BIT(chan->channel));
@@ -448,7 +471,11 @@ static int at91_adc_dma_start(struct iio_dev *indio_dev)
 
 
 	for_each_set_bit(bit, indio_dev->active_scan_mask,
 	for_each_set_bit(bit, indio_dev->active_scan_mask,
 			 indio_dev->num_channels) {
 			 indio_dev->num_channels) {
-		struct iio_chan_spec const *chan = indio_dev->channels + bit;
+		struct iio_chan_spec const *chan =
+					 at91_adc_chan_get(indio_dev, bit);
+
+		if (!chan)
+			continue;
 
 
 		st->dma_st.rx_buf_sz += chan->scan_type.storagebits / 8;
 		st->dma_st.rx_buf_sz += chan->scan_type.storagebits / 8;
 	}
 	}
@@ -526,8 +553,11 @@ static int at91_adc_buffer_predisable(struct iio_dev *indio_dev)
 	 */
 	 */
 	for_each_set_bit(bit, indio_dev->active_scan_mask,
 	for_each_set_bit(bit, indio_dev->active_scan_mask,
 			 indio_dev->num_channels) {
 			 indio_dev->num_channels) {
-		struct iio_chan_spec const *chan = indio_dev->channels + bit;
+		struct iio_chan_spec const *chan =
+					at91_adc_chan_get(indio_dev, bit);
 
 
+		if (!chan)
+			continue;
 		if (st->dma_st.dma_chan)
 		if (st->dma_st.dma_chan)
 			at91_adc_readl(st, chan->address);
 			at91_adc_readl(st, chan->address);
 	}
 	}
@@ -587,8 +617,11 @@ static void at91_adc_trigger_handler_nodma(struct iio_dev *indio_dev,
 
 
 	for_each_set_bit(bit, indio_dev->active_scan_mask,
 	for_each_set_bit(bit, indio_dev->active_scan_mask,
 			 indio_dev->num_channels) {
 			 indio_dev->num_channels) {
-		struct iio_chan_spec const *chan = indio_dev->channels + bit;
+		struct iio_chan_spec const *chan =
+					at91_adc_chan_get(indio_dev, bit);
 
 
+		if (!chan)
+			continue;
 		st->buffer[i] = at91_adc_readl(st, chan->address);
 		st->buffer[i] = at91_adc_readl(st, chan->address);
 		i++;
 		i++;
 	}
 	}

+ 14 - 3
drivers/iio/adc/stm32-dfsdm-adc.c

@@ -144,6 +144,7 @@ static int stm32_dfsdm_set_osrs(struct stm32_dfsdm_filter *fl,
 	 * Leave as soon as if exact resolution if reached.
 	 * Leave as soon as if exact resolution if reached.
 	 * Otherwise the higher resolution below 32 bits is kept.
 	 * Otherwise the higher resolution below 32 bits is kept.
 	 */
 	 */
+	fl->res = 0;
 	for (fosr = 1; fosr <= DFSDM_MAX_FL_OVERSAMPLING; fosr++) {
 	for (fosr = 1; fosr <= DFSDM_MAX_FL_OVERSAMPLING; fosr++) {
 		for (iosr = 1; iosr <= DFSDM_MAX_INT_OVERSAMPLING; iosr++) {
 		for (iosr = 1; iosr <= DFSDM_MAX_INT_OVERSAMPLING; iosr++) {
 			if (fast)
 			if (fast)
@@ -193,7 +194,7 @@ static int stm32_dfsdm_set_osrs(struct stm32_dfsdm_filter *fl,
 		}
 		}
 	}
 	}
 
 
-	if (!fl->fosr)
+	if (!fl->res)
 		return -EINVAL;
 		return -EINVAL;
 
 
 	return 0;
 	return 0;
@@ -770,7 +771,7 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
 	struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
 	struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
 	struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
 	struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
 	struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[chan->channel];
 	struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[chan->channel];
-	unsigned int spi_freq = adc->spi_freq;
+	unsigned int spi_freq;
 	int ret = -EINVAL;
 	int ret = -EINVAL;
 
 
 	switch (mask) {
 	switch (mask) {
@@ -784,8 +785,18 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
 	case IIO_CHAN_INFO_SAMP_FREQ:
 	case IIO_CHAN_INFO_SAMP_FREQ:
 		if (!val)
 		if (!val)
 			return -EINVAL;
 			return -EINVAL;
-		if (ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL)
+
+		switch (ch->src) {
+		case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL:
 			spi_freq = adc->dfsdm->spi_master_freq;
 			spi_freq = adc->dfsdm->spi_master_freq;
+			break;
+		case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_FALLING:
+		case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_RISING:
+			spi_freq = adc->dfsdm->spi_master_freq / 2;
+			break;
+		default:
+			spi_freq = adc->spi_freq;
+		}
 
 
 		if (spi_freq % val)
 		if (spi_freq % val)
 			dev_warn(&indio_dev->dev,
 			dev_warn(&indio_dev->dev,

+ 1 - 1
drivers/iio/buffer/industrialio-buffer-dma.c

@@ -587,7 +587,7 @@ EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum);
  * Should be used as the set_length callback for iio_buffer_access_ops
  * Should be used as the set_length callback for iio_buffer_access_ops
  * struct for DMA buffers.
  * struct for DMA buffers.
  */
  */
-int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length)
+int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length)
 {
 {
 	/* Avoid an invalid state */
 	/* Avoid an invalid state */
 	if (length < 2)
 	if (length < 2)

+ 9 - 2
drivers/iio/buffer/kfifo_buf.c

@@ -22,11 +22,18 @@ struct iio_kfifo {
 #define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer)
 #define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer)
 
 
 static inline int __iio_allocate_kfifo(struct iio_kfifo *buf,
 static inline int __iio_allocate_kfifo(struct iio_kfifo *buf,
-				int bytes_per_datum, int length)
+			size_t bytes_per_datum, unsigned int length)
 {
 {
 	if ((length == 0) || (bytes_per_datum == 0))
 	if ((length == 0) || (bytes_per_datum == 0))
 		return -EINVAL;
 		return -EINVAL;
 
 
+	/*
+	 * Make sure we don't overflow an unsigned int after kfifo rounds up to
+	 * the next power of 2.
+	 */
+	if (roundup_pow_of_two(length) > UINT_MAX / bytes_per_datum)
+		return -EINVAL;
+
 	return __kfifo_alloc((struct __kfifo *)&buf->kf, length,
 	return __kfifo_alloc((struct __kfifo *)&buf->kf, length,
 			     bytes_per_datum, GFP_KERNEL);
 			     bytes_per_datum, GFP_KERNEL);
 }
 }
@@ -67,7 +74,7 @@ static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd)
 	return 0;
 	return 0;
 }
 }
 
 
-static int iio_set_length_kfifo(struct iio_buffer *r, int length)
+static int iio_set_length_kfifo(struct iio_buffer *r, unsigned int length)
 {
 {
 	/* Avoid an invalid state */
 	/* Avoid an invalid state */
 	if (length < 2)
 	if (length < 2)

+ 4 - 4
drivers/iio/common/hid-sensors/hid-sensor-trigger.c

@@ -178,14 +178,14 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
 #ifdef CONFIG_PM
 #ifdef CONFIG_PM
 	int ret;
 	int ret;
 
 
-	atomic_set(&st->user_requested_state, state);
-
 	if (atomic_add_unless(&st->runtime_pm_enable, 1, 1))
 	if (atomic_add_unless(&st->runtime_pm_enable, 1, 1))
 		pm_runtime_enable(&st->pdev->dev);
 		pm_runtime_enable(&st->pdev->dev);
 
 
-	if (state)
+	if (state) {
+		atomic_inc(&st->user_requested_state);
 		ret = pm_runtime_get_sync(&st->pdev->dev);
 		ret = pm_runtime_get_sync(&st->pdev->dev);
-	else {
+	} else {
+		atomic_dec(&st->user_requested_state);
 		pm_runtime_mark_last_busy(&st->pdev->dev);
 		pm_runtime_mark_last_busy(&st->pdev->dev);
 		pm_runtime_use_autosuspend(&st->pdev->dev);
 		pm_runtime_use_autosuspend(&st->pdev->dev);
 		ret = pm_runtime_put_autosuspend(&st->pdev->dev);
 		ret = pm_runtime_put_autosuspend(&st->pdev->dev);

+ 1 - 1
drivers/infiniband/core/cache.c

@@ -502,7 +502,7 @@ static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
 		return -EINVAL;
 		return -EINVAL;
 
 
 	if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
 	if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
-		return -EAGAIN;
+		return -EINVAL;
 
 
 	memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
 	memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
 	if (attr) {
 	if (attr) {

+ 54 - 1
drivers/infiniband/hw/bnxt_re/main.c

@@ -185,12 +185,65 @@ static void bnxt_re_shutdown(void *p)
 	bnxt_re_ib_unreg(rdev, false);
 	bnxt_re_ib_unreg(rdev, false);
 }
 }
 
 
+static void bnxt_re_stop_irq(void *handle)
+{
+	struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
+	struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
+	struct bnxt_qplib_nq *nq;
+	int indx;
+
+	for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) {
+		nq = &rdev->nq[indx - 1];
+		bnxt_qplib_nq_stop_irq(nq, false);
+	}
+
+	bnxt_qplib_rcfw_stop_irq(rcfw, false);
+}
+
+static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
+{
+	struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
+	struct bnxt_msix_entry *msix_ent = rdev->msix_entries;
+	struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
+	struct bnxt_qplib_nq *nq;
+	int indx, rc;
+
+	if (!ent) {
+		/* Not setting the f/w timeout bit in rcfw.
+		 * During the driver unload the first command
+		 * to f/w will timeout and that will set the
+		 * timeout bit.
+		 */
+		dev_err(rdev_to_dev(rdev), "Failed to re-start IRQs\n");
+		return;
+	}
+
+	/* Vectors may change after restart, so update with new vectors
+	 * in device sctructure.
+	 */
+	for (indx = 0; indx < rdev->num_msix; indx++)
+		rdev->msix_entries[indx].vector = ent[indx].vector;
+
+	bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
+				  false);
+	for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) {
+		nq = &rdev->nq[indx - 1];
+		rc = bnxt_qplib_nq_start_irq(nq, indx - 1,
+					     msix_ent[indx].vector, false);
+		if (rc)
+			dev_warn(rdev_to_dev(rdev),
+				 "Failed to reinit NQ index %d\n", indx - 1);
+	}
+}
+
 static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
 static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
 	.ulp_async_notifier = NULL,
 	.ulp_async_notifier = NULL,
 	.ulp_stop = bnxt_re_stop,
 	.ulp_stop = bnxt_re_stop,
 	.ulp_start = bnxt_re_start,
 	.ulp_start = bnxt_re_start,
 	.ulp_sriov_config = bnxt_re_sriov_config,
 	.ulp_sriov_config = bnxt_re_sriov_config,
-	.ulp_shutdown = bnxt_re_shutdown
+	.ulp_shutdown = bnxt_re_shutdown,
+	.ulp_irq_stop = bnxt_re_stop_irq,
+	.ulp_irq_restart = bnxt_re_start_irq
 };
 };
 
 
 /* RoCE -> Net driver */
 /* RoCE -> Net driver */

+ 60 - 34
drivers/infiniband/hw/bnxt_re/qplib_fp.c

@@ -336,22 +336,32 @@ static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
 	return IRQ_HANDLED;
 	return IRQ_HANDLED;
 }
 }
 
 
+void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
+{
+	tasklet_disable(&nq->worker);
+	/* Mask h/w interrupt */
+	NQ_DB(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
+	/* Sync with last running IRQ handler */
+	synchronize_irq(nq->vector);
+	if (kill)
+		tasklet_kill(&nq->worker);
+	if (nq->requested) {
+		irq_set_affinity_hint(nq->vector, NULL);
+		free_irq(nq->vector, nq);
+		nq->requested = false;
+	}
+}
+
 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
 {
 {
 	if (nq->cqn_wq) {
 	if (nq->cqn_wq) {
 		destroy_workqueue(nq->cqn_wq);
 		destroy_workqueue(nq->cqn_wq);
 		nq->cqn_wq = NULL;
 		nq->cqn_wq = NULL;
 	}
 	}
+
 	/* Make sure the HW is stopped! */
 	/* Make sure the HW is stopped! */
-	synchronize_irq(nq->vector);
-	tasklet_disable(&nq->worker);
-	tasklet_kill(&nq->worker);
+	bnxt_qplib_nq_stop_irq(nq, true);
 
 
-	if (nq->requested) {
-		irq_set_affinity_hint(nq->vector, NULL);
-		free_irq(nq->vector, nq);
-		nq->requested = false;
-	}
 	if (nq->bar_reg_iomem)
 	if (nq->bar_reg_iomem)
 		iounmap(nq->bar_reg_iomem);
 		iounmap(nq->bar_reg_iomem);
 	nq->bar_reg_iomem = NULL;
 	nq->bar_reg_iomem = NULL;
@@ -361,6 +371,40 @@ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
 	nq->vector = 0;
 	nq->vector = 0;
 }
 }
 
 
+int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
+			    int msix_vector, bool need_init)
+{
+	int rc;
+
+	if (nq->requested)
+		return -EFAULT;
+
+	nq->vector = msix_vector;
+	if (need_init)
+		tasklet_init(&nq->worker, bnxt_qplib_service_nq,
+			     (unsigned long)nq);
+	else
+		tasklet_enable(&nq->worker);
+
+	snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx);
+	rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
+	if (rc)
+		return rc;
+
+	cpumask_clear(&nq->mask);
+	cpumask_set_cpu(nq_indx, &nq->mask);
+	rc = irq_set_affinity_hint(nq->vector, &nq->mask);
+	if (rc) {
+		dev_warn(&nq->pdev->dev,
+			 "QPLIB: set affinity failed; vector: %d nq_idx: %d\n",
+			 nq->vector, nq_indx);
+	}
+	nq->requested = true;
+	NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
+
+	return rc;
+}
+
 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
 			 int nq_idx, int msix_vector, int bar_reg_offset,
 			 int nq_idx, int msix_vector, int bar_reg_offset,
 			 int (*cqn_handler)(struct bnxt_qplib_nq *nq,
 			 int (*cqn_handler)(struct bnxt_qplib_nq *nq,
@@ -372,41 +416,17 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
 	resource_size_t nq_base;
 	resource_size_t nq_base;
 	int rc = -1;
 	int rc = -1;
 
 
-	nq->pdev = pdev;
-	nq->vector = msix_vector;
 	if (cqn_handler)
 	if (cqn_handler)
 		nq->cqn_handler = cqn_handler;
 		nq->cqn_handler = cqn_handler;
 
 
 	if (srqn_handler)
 	if (srqn_handler)
 		nq->srqn_handler = srqn_handler;
 		nq->srqn_handler = srqn_handler;
 
 
-	tasklet_init(&nq->worker, bnxt_qplib_service_nq, (unsigned long)nq);
-
 	/* Have a task to schedule CQ notifiers in post send case */
 	/* Have a task to schedule CQ notifiers in post send case */
 	nq->cqn_wq  = create_singlethread_workqueue("bnxt_qplib_nq");
 	nq->cqn_wq  = create_singlethread_workqueue("bnxt_qplib_nq");
 	if (!nq->cqn_wq)
 	if (!nq->cqn_wq)
-		goto fail;
-
-	nq->requested = false;
-	memset(nq->name, 0, 32);
-	sprintf(nq->name, "bnxt_qplib_nq-%d", nq_idx);
-	rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
-	if (rc) {
-		dev_err(&nq->pdev->dev,
-			"Failed to request IRQ for NQ: %#x", rc);
-		goto fail;
-	}
-
-	cpumask_clear(&nq->mask);
-	cpumask_set_cpu(nq_idx, &nq->mask);
-	rc = irq_set_affinity_hint(nq->vector, &nq->mask);
-	if (rc) {
-		dev_warn(&nq->pdev->dev,
-			 "QPLIB: set affinity failed; vector: %d nq_idx: %d\n",
-			 nq->vector, nq_idx);
-	}
+		return -ENOMEM;
 
 
-	nq->requested = true;
 	nq->bar_reg = NQ_CONS_PCI_BAR_REGION;
 	nq->bar_reg = NQ_CONS_PCI_BAR_REGION;
 	nq->bar_reg_off = bar_reg_offset;
 	nq->bar_reg_off = bar_reg_offset;
 	nq_base = pci_resource_start(pdev, nq->bar_reg);
 	nq_base = pci_resource_start(pdev, nq->bar_reg);
@@ -419,7 +439,13 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
 		rc = -ENOMEM;
 		rc = -ENOMEM;
 		goto fail;
 		goto fail;
 	}
 	}
-	NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
+
+	rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
+	if (rc) {
+		dev_err(&nq->pdev->dev,
+			"QPLIB: Failed to request irq for nq-idx %d", nq_idx);
+		goto fail;
+	}
 
 
 	return 0;
 	return 0;
 fail:
 fail:

+ 3 - 0
drivers/infiniband/hw/bnxt_re/qplib_fp.h

@@ -467,7 +467,10 @@ struct bnxt_qplib_nq_work {
 	struct bnxt_qplib_cq    *cq;
 	struct bnxt_qplib_cq    *cq;
 };
 };
 
 
+void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill);
 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq);
 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq);
+int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
+			    int msix_vector, bool need_init);
 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
 			 int nq_idx, int msix_vector, int bar_reg_offset,
 			 int nq_idx, int msix_vector, int bar_reg_offset,
 			 int (*cqn_handler)(struct bnxt_qplib_nq *nq,
 			 int (*cqn_handler)(struct bnxt_qplib_nq *nq,

+ 43 - 18
drivers/infiniband/hw/bnxt_re/qplib_rcfw.c

@@ -582,19 +582,29 @@ fail:
 	return -ENOMEM;
 	return -ENOMEM;
 }
 }
 
 
-void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
+void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill)
 {
 {
-	unsigned long indx;
-
-	/* Make sure the HW channel is stopped! */
-	synchronize_irq(rcfw->vector);
 	tasklet_disable(&rcfw->worker);
 	tasklet_disable(&rcfw->worker);
-	tasklet_kill(&rcfw->worker);
+	/* Mask h/w interrupts */
+	CREQ_DB(rcfw->creq_bar_reg_iomem, rcfw->creq.cons,
+		rcfw->creq.max_elements);
+	/* Sync with last running IRQ-handler */
+	synchronize_irq(rcfw->vector);
+	if (kill)
+		tasklet_kill(&rcfw->worker);
 
 
 	if (rcfw->requested) {
 	if (rcfw->requested) {
 		free_irq(rcfw->vector, rcfw);
 		free_irq(rcfw->vector, rcfw);
 		rcfw->requested = false;
 		rcfw->requested = false;
 	}
 	}
+}
+
+void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
+{
+	unsigned long indx;
+
+	bnxt_qplib_rcfw_stop_irq(rcfw, true);
+
 	if (rcfw->cmdq_bar_reg_iomem)
 	if (rcfw->cmdq_bar_reg_iomem)
 		iounmap(rcfw->cmdq_bar_reg_iomem);
 		iounmap(rcfw->cmdq_bar_reg_iomem);
 	rcfw->cmdq_bar_reg_iomem = NULL;
 	rcfw->cmdq_bar_reg_iomem = NULL;
@@ -614,6 +624,31 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
 	rcfw->vector = 0;
 	rcfw->vector = 0;
 }
 }
 
 
+int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
+			      bool need_init)
+{
+	int rc;
+
+	if (rcfw->requested)
+		return -EFAULT;
+
+	rcfw->vector = msix_vector;
+	if (need_init)
+		tasklet_init(&rcfw->worker,
+			     bnxt_qplib_service_creq, (unsigned long)rcfw);
+	else
+		tasklet_enable(&rcfw->worker);
+	rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0,
+			 "bnxt_qplib_creq", rcfw);
+	if (rc)
+		return rc;
+	rcfw->requested = true;
+	CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, rcfw->creq.cons,
+		      rcfw->creq.max_elements);
+
+	return 0;
+}
+
 int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
 int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
 				   struct bnxt_qplib_rcfw *rcfw,
 				   struct bnxt_qplib_rcfw *rcfw,
 				   int msix_vector,
 				   int msix_vector,
@@ -675,27 +710,17 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
 	rcfw->creq_qp_event_processed = 0;
 	rcfw->creq_qp_event_processed = 0;
 	rcfw->creq_func_event_processed = 0;
 	rcfw->creq_func_event_processed = 0;
 
 
-	rcfw->vector = msix_vector;
 	if (aeq_handler)
 	if (aeq_handler)
 		rcfw->aeq_handler = aeq_handler;
 		rcfw->aeq_handler = aeq_handler;
+	init_waitqueue_head(&rcfw->waitq);
 
 
-	tasklet_init(&rcfw->worker, bnxt_qplib_service_creq,
-		     (unsigned long)rcfw);
-
-	rcfw->requested = false;
-	rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0,
-			 "bnxt_qplib_creq", rcfw);
+	rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true);
 	if (rc) {
 	if (rc) {
 		dev_err(&rcfw->pdev->dev,
 		dev_err(&rcfw->pdev->dev,
 			"QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc);
 			"QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc);
 		bnxt_qplib_disable_rcfw_channel(rcfw);
 		bnxt_qplib_disable_rcfw_channel(rcfw);
 		return rc;
 		return rc;
 	}
 	}
-	rcfw->requested = true;
-
-	init_waitqueue_head(&rcfw->waitq);
-
-	CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, 0, rcfw->creq.max_elements);
 
 
 	init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]);
 	init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]);
 	init.cmdq_size_cmdq_lvl = cpu_to_le16(
 	init.cmdq_size_cmdq_lvl = cpu_to_le16(

+ 3 - 0
drivers/infiniband/hw/bnxt_re/qplib_rcfw.h

@@ -195,7 +195,10 @@ struct bnxt_qplib_rcfw {
 void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
 void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
 int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
 int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
 				  struct bnxt_qplib_rcfw *rcfw, int qp_tbl_sz);
 				  struct bnxt_qplib_rcfw *rcfw, int qp_tbl_sz);
+void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill);
 void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
 void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
+int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
+			      bool need_init);
 int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
 int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
 				   struct bnxt_qplib_rcfw *rcfw,
 				   struct bnxt_qplib_rcfw *rcfw,
 				   int msix_vector,
 				   int msix_vector,

+ 1 - 1
drivers/infiniband/ulp/srpt/Kconfig

@@ -1,6 +1,6 @@
 config INFINIBAND_SRPT
 config INFINIBAND_SRPT
 	tristate "InfiniBand SCSI RDMA Protocol target support"
 	tristate "InfiniBand SCSI RDMA Protocol target support"
-	depends on INFINIBAND_ADDR_TRANS && TARGET_CORE
+	depends on INFINIBAND && INFINIBAND_ADDR_TRANS && TARGET_CORE
 	---help---
 	---help---
 
 
 	  Support for the SCSI RDMA Protocol (SRP) Target driver. The
 	  Support for the SCSI RDMA Protocol (SRP) Target driver. The

+ 11 - 11
drivers/input/mouse/elan_i2c_smbus.c

@@ -130,7 +130,7 @@ static int elan_smbus_get_baseline_data(struct i2c_client *client,
 					bool max_baseline, u8 *value)
 					bool max_baseline, u8 *value)
 {
 {
 	int error;
 	int error;
-	u8 val[3];
+	u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
 
 
 	error = i2c_smbus_read_block_data(client,
 	error = i2c_smbus_read_block_data(client,
 					  max_baseline ?
 					  max_baseline ?
@@ -149,7 +149,7 @@ static int elan_smbus_get_version(struct i2c_client *client,
 				  bool iap, u8 *version)
 				  bool iap, u8 *version)
 {
 {
 	int error;
 	int error;
-	u8 val[3];
+	u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
 
 
 	error = i2c_smbus_read_block_data(client,
 	error = i2c_smbus_read_block_data(client,
 					  iap ? ETP_SMBUS_IAP_VERSION_CMD :
 					  iap ? ETP_SMBUS_IAP_VERSION_CMD :
@@ -170,7 +170,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
 				     u8 *clickpad)
 				     u8 *clickpad)
 {
 {
 	int error;
 	int error;
-	u8 val[3];
+	u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
 
 
 	error = i2c_smbus_read_block_data(client,
 	error = i2c_smbus_read_block_data(client,
 					  ETP_SMBUS_SM_VERSION_CMD, val);
 					  ETP_SMBUS_SM_VERSION_CMD, val);
@@ -188,7 +188,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
 static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id)
 static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id)
 {
 {
 	int error;
 	int error;
-	u8 val[3];
+	u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
 
 
 	error = i2c_smbus_read_block_data(client,
 	error = i2c_smbus_read_block_data(client,
 					  ETP_SMBUS_UNIQUEID_CMD, val);
 					  ETP_SMBUS_UNIQUEID_CMD, val);
@@ -205,7 +205,7 @@ static int elan_smbus_get_checksum(struct i2c_client *client,
 				   bool iap, u16 *csum)
 				   bool iap, u16 *csum)
 {
 {
 	int error;
 	int error;
-	u8 val[3];
+	u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
 
 
 	error = i2c_smbus_read_block_data(client,
 	error = i2c_smbus_read_block_data(client,
 					  iap ? ETP_SMBUS_FW_CHECKSUM_CMD :
 					  iap ? ETP_SMBUS_FW_CHECKSUM_CMD :
@@ -226,7 +226,7 @@ static int elan_smbus_get_max(struct i2c_client *client,
 {
 {
 	int ret;
 	int ret;
 	int error;
 	int error;
-	u8 val[3];
+	u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
 
 
 	ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RANGE_CMD, val);
 	ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RANGE_CMD, val);
 	if (ret != 3) {
 	if (ret != 3) {
@@ -246,7 +246,7 @@ static int elan_smbus_get_resolution(struct i2c_client *client,
 {
 {
 	int ret;
 	int ret;
 	int error;
 	int error;
-	u8 val[3];
+	u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
 
 
 	ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RESOLUTION_CMD, val);
 	ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RESOLUTION_CMD, val);
 	if (ret != 3) {
 	if (ret != 3) {
@@ -267,7 +267,7 @@ static int elan_smbus_get_num_traces(struct i2c_client *client,
 {
 {
 	int ret;
 	int ret;
 	int error;
 	int error;
-	u8 val[3];
+	u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
 
 
 	ret = i2c_smbus_read_block_data(client, ETP_SMBUS_XY_TRACENUM_CMD, val);
 	ret = i2c_smbus_read_block_data(client, ETP_SMBUS_XY_TRACENUM_CMD, val);
 	if (ret != 3) {
 	if (ret != 3) {
@@ -294,7 +294,7 @@ static int elan_smbus_iap_get_mode(struct i2c_client *client,
 {
 {
 	int error;
 	int error;
 	u16 constant;
 	u16 constant;
-	u8 val[3];
+	u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
 
 
 	error = i2c_smbus_read_block_data(client, ETP_SMBUS_IAP_CTRL_CMD, val);
 	error = i2c_smbus_read_block_data(client, ETP_SMBUS_IAP_CTRL_CMD, val);
 	if (error < 0) {
 	if (error < 0) {
@@ -345,7 +345,7 @@ static int elan_smbus_prepare_fw_update(struct i2c_client *client)
 	int len;
 	int len;
 	int error;
 	int error;
 	enum tp_mode mode;
 	enum tp_mode mode;
-	u8 val[3];
+	u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
 	u8 cmd[4] = {0x0F, 0x78, 0x00, 0x06};
 	u8 cmd[4] = {0x0F, 0x78, 0x00, 0x06};
 	u16 password;
 	u16 password;
 
 
@@ -419,7 +419,7 @@ static int elan_smbus_write_fw_block(struct i2c_client *client,
 	struct device *dev = &client->dev;
 	struct device *dev = &client->dev;
 	int error;
 	int error;
 	u16 result;
 	u16 result;
-	u8 val[3];
+	u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
 
 
 	/*
 	/*
 	 * Due to the limitation of smbus protocol limiting
 	 * Due to the limitation of smbus protocol limiting

+ 6 - 0
drivers/input/mouse/synaptics.c

@@ -172,6 +172,12 @@ static const char * const smbus_pnp_ids[] = {
 	"LEN0048", /* X1 Carbon 3 */
 	"LEN0048", /* X1 Carbon 3 */
 	"LEN0046", /* X250 */
 	"LEN0046", /* X250 */
 	"LEN004a", /* W541 */
 	"LEN004a", /* W541 */
+	"LEN0071", /* T480 */
+	"LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
+	"LEN0073", /* X1 Carbon G5 (Elantech) */
+	"LEN0092", /* X1 Carbon 6 */
+	"LEN0096", /* X280 */
+	"LEN0097", /* X280 -> ALPS trackpoint */
 	"LEN200f", /* T450s */
 	"LEN200f", /* T450s */
 	NULL
 	NULL
 };
 };

+ 13 - 0
drivers/net/dsa/b53/b53_common.c

@@ -1711,6 +1711,18 @@ static const struct b53_chip_data b53_switch_chips[] = {
 		.cpu_port = B53_CPU_PORT_25,
 		.cpu_port = B53_CPU_PORT_25,
 		.duplex_reg = B53_DUPLEX_STAT_FE,
 		.duplex_reg = B53_DUPLEX_STAT_FE,
 	},
 	},
+	{
+		.chip_id = BCM5389_DEVICE_ID,
+		.dev_name = "BCM5389",
+		.vlans = 4096,
+		.enabled_ports = 0x1f,
+		.arl_entries = 4,
+		.cpu_port = B53_CPU_PORT,
+		.vta_regs = B53_VTA_REGS,
+		.duplex_reg = B53_DUPLEX_STAT_GE,
+		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+	},
 	{
 	{
 		.chip_id = BCM5395_DEVICE_ID,
 		.chip_id = BCM5395_DEVICE_ID,
 		.dev_name = "BCM5395",
 		.dev_name = "BCM5395",
@@ -2034,6 +2046,7 @@ int b53_switch_detect(struct b53_device *dev)
 		else
 		else
 			dev->chip_id = BCM5365_DEVICE_ID;
 			dev->chip_id = BCM5365_DEVICE_ID;
 		break;
 		break;
+	case BCM5389_DEVICE_ID:
 	case BCM5395_DEVICE_ID:
 	case BCM5395_DEVICE_ID:
 	case BCM5397_DEVICE_ID:
 	case BCM5397_DEVICE_ID:
 	case BCM5398_DEVICE_ID:
 	case BCM5398_DEVICE_ID:

+ 4 - 1
drivers/net/dsa/b53/b53_mdio.c

@@ -285,6 +285,7 @@ static const struct b53_io_ops b53_mdio_ops = {
 #define B53_BRCM_OUI_1	0x0143bc00
 #define B53_BRCM_OUI_1	0x0143bc00
 #define B53_BRCM_OUI_2	0x03625c00
 #define B53_BRCM_OUI_2	0x03625c00
 #define B53_BRCM_OUI_3	0x00406000
 #define B53_BRCM_OUI_3	0x00406000
+#define B53_BRCM_OUI_4	0x01410c00
 
 
 static int b53_mdio_probe(struct mdio_device *mdiodev)
 static int b53_mdio_probe(struct mdio_device *mdiodev)
 {
 {
@@ -311,7 +312,8 @@ static int b53_mdio_probe(struct mdio_device *mdiodev)
 	 */
 	 */
 	if ((phy_id & 0xfffffc00) != B53_BRCM_OUI_1 &&
 	if ((phy_id & 0xfffffc00) != B53_BRCM_OUI_1 &&
 	    (phy_id & 0xfffffc00) != B53_BRCM_OUI_2 &&
 	    (phy_id & 0xfffffc00) != B53_BRCM_OUI_2 &&
-	    (phy_id & 0xfffffc00) != B53_BRCM_OUI_3) {
+	    (phy_id & 0xfffffc00) != B53_BRCM_OUI_3 &&
+	    (phy_id & 0xfffffc00) != B53_BRCM_OUI_4) {
 		dev_err(&mdiodev->dev, "Unsupported device: 0x%08x\n", phy_id);
 		dev_err(&mdiodev->dev, "Unsupported device: 0x%08x\n", phy_id);
 		return -ENODEV;
 		return -ENODEV;
 	}
 	}
@@ -360,6 +362,7 @@ static const struct of_device_id b53_of_match[] = {
 	{ .compatible = "brcm,bcm53125" },
 	{ .compatible = "brcm,bcm53125" },
 	{ .compatible = "brcm,bcm53128" },
 	{ .compatible = "brcm,bcm53128" },
 	{ .compatible = "brcm,bcm5365" },
 	{ .compatible = "brcm,bcm5365" },
+	{ .compatible = "brcm,bcm5389" },
 	{ .compatible = "brcm,bcm5395" },
 	{ .compatible = "brcm,bcm5395" },
 	{ .compatible = "brcm,bcm5397" },
 	{ .compatible = "brcm,bcm5397" },
 	{ .compatible = "brcm,bcm5398" },
 	{ .compatible = "brcm,bcm5398" },

+ 1 - 0
drivers/net/dsa/b53/b53_priv.h

@@ -48,6 +48,7 @@ struct b53_io_ops {
 enum {
 enum {
 	BCM5325_DEVICE_ID = 0x25,
 	BCM5325_DEVICE_ID = 0x25,
 	BCM5365_DEVICE_ID = 0x65,
 	BCM5365_DEVICE_ID = 0x65,
+	BCM5389_DEVICE_ID = 0x89,
 	BCM5395_DEVICE_ID = 0x95,
 	BCM5395_DEVICE_ID = 0x95,
 	BCM5397_DEVICE_ID = 0x97,
 	BCM5397_DEVICE_ID = 0x97,
 	BCM5398_DEVICE_ID = 0x98,
 	BCM5398_DEVICE_ID = 0x98,

+ 3 - 1
drivers/net/ethernet/emulex/benet/be_main.c

@@ -3309,7 +3309,9 @@ void be_detect_error(struct be_adapter *adapter)
 				if ((val & POST_STAGE_FAT_LOG_START)
 				if ((val & POST_STAGE_FAT_LOG_START)
 				     != POST_STAGE_FAT_LOG_START &&
 				     != POST_STAGE_FAT_LOG_START &&
 				    (val & POST_STAGE_ARMFW_UE)
 				    (val & POST_STAGE_ARMFW_UE)
-				     != POST_STAGE_ARMFW_UE)
+				     != POST_STAGE_ARMFW_UE &&
+				    (val & POST_STAGE_RECOVERABLE_ERR)
+				     != POST_STAGE_RECOVERABLE_ERR)
 					return;
 					return;
 			}
 			}
 
 

+ 4 - 5
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c

@@ -9054,7 +9054,6 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
 {
 {
 	const struct tc_action *a;
 	const struct tc_action *a;
 	LIST_HEAD(actions);
 	LIST_HEAD(actions);
-	int err;
 
 
 	if (!tcf_exts_has_actions(exts))
 	if (!tcf_exts_has_actions(exts))
 		return -EINVAL;
 		return -EINVAL;
@@ -9075,11 +9074,11 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
 
 
 			if (!dev)
 			if (!dev)
 				return -EINVAL;
 				return -EINVAL;
-			err = handle_redirect_action(adapter, dev->ifindex, queue,
-						     action);
-			if (err == 0)
-				return err;
+			return handle_redirect_action(adapter, dev->ifindex,
+						      queue, action);
 		}
 		}
+
+		return -EINVAL;
 	}
 	}
 
 
 	return -EINVAL;
 	return -EINVAL;

+ 5 - 0
drivers/net/ethernet/mellanox/mlxsw/spectrum.c

@@ -4433,6 +4433,11 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
 			NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
 			NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
 			return -EINVAL;
 			return -EINVAL;
 		}
 		}
+		if (is_vlan_dev(upper_dev) &&
+		    vlan_dev_vlan_id(upper_dev) == 1) {
+			NL_SET_ERR_MSG_MOD(extack, "Creating a VLAN device with VID 1 is unsupported: VLAN 1 carries untagged traffic");
+			return -EINVAL;
+		}
 		break;
 		break;
 	case NETDEV_CHANGEUPPER:
 	case NETDEV_CHANGEUPPER:
 		upper_dev = info->upper_dev;
 		upper_dev = info->upper_dev;

+ 1 - 1
drivers/net/ethernet/natsemi/sonic.c

@@ -84,7 +84,7 @@ static int sonic_open(struct net_device *dev)
 	for (i = 0; i < SONIC_NUM_RRS; i++) {
 	for (i = 0; i < SONIC_NUM_RRS; i++) {
 		dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE),
 		dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE),
 		                                  SONIC_RBSIZE, DMA_FROM_DEVICE);
 		                                  SONIC_RBSIZE, DMA_FROM_DEVICE);
-		if (!laddr) {
+		if (dma_mapping_error(lp->device, laddr)) {
 			while(i > 0) { /* free any that were mapped successfully */
 			while(i > 0) { /* free any that were mapped successfully */
 				i--;
 				i--;
 				dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
 				dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);

+ 2 - 2
drivers/net/ethernet/socionext/netsec.c

@@ -1674,8 +1674,8 @@ static int netsec_probe(struct platform_device *pdev)
 	if (ret)
 	if (ret)
 		goto unreg_napi;
 		goto unreg_napi;
 
 
-	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
-		dev_warn(&pdev->dev, "Failed to enable 64-bit DMA\n");
+	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)))
+		dev_warn(&pdev->dev, "Failed to set DMA mask\n");
 
 
 	ret = register_netdev(ndev);
 	ret = register_netdev(ndev);
 	if (ret) {
 	if (ret) {

+ 12 - 10
drivers/net/ethernet/ti/davinci_emac.c

@@ -1873,7 +1873,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
 	if (IS_ERR(priv->txchan)) {
 	if (IS_ERR(priv->txchan)) {
 		dev_err(&pdev->dev, "error initializing tx dma channel\n");
 		dev_err(&pdev->dev, "error initializing tx dma channel\n");
 		rc = PTR_ERR(priv->txchan);
 		rc = PTR_ERR(priv->txchan);
-		goto no_cpdma_chan;
+		goto err_free_dma;
 	}
 	}
 
 
 	priv->rxchan = cpdma_chan_create(priv->dma, EMAC_DEF_RX_CH,
 	priv->rxchan = cpdma_chan_create(priv->dma, EMAC_DEF_RX_CH,
@@ -1881,14 +1881,14 @@ static int davinci_emac_probe(struct platform_device *pdev)
 	if (IS_ERR(priv->rxchan)) {
 	if (IS_ERR(priv->rxchan)) {
 		dev_err(&pdev->dev, "error initializing rx dma channel\n");
 		dev_err(&pdev->dev, "error initializing rx dma channel\n");
 		rc = PTR_ERR(priv->rxchan);
 		rc = PTR_ERR(priv->rxchan);
-		goto no_cpdma_chan;
+		goto err_free_txchan;
 	}
 	}
 
 
 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 	if (!res) {
 	if (!res) {
 		dev_err(&pdev->dev, "error getting irq res\n");
 		dev_err(&pdev->dev, "error getting irq res\n");
 		rc = -ENOENT;
 		rc = -ENOENT;
-		goto no_cpdma_chan;
+		goto err_free_rxchan;
 	}
 	}
 	ndev->irq = res->start;
 	ndev->irq = res->start;
 
 
@@ -1914,7 +1914,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
 		pm_runtime_put_noidle(&pdev->dev);
 		pm_runtime_put_noidle(&pdev->dev);
 		dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n",
 		dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n",
 			__func__, rc);
 			__func__, rc);
-		goto no_cpdma_chan;
+		goto err_napi_del;
 	}
 	}
 
 
 	/* register the network device */
 	/* register the network device */
@@ -1924,7 +1924,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
 		dev_err(&pdev->dev, "error in register_netdev\n");
 		dev_err(&pdev->dev, "error in register_netdev\n");
 		rc = -ENODEV;
 		rc = -ENODEV;
 		pm_runtime_put(&pdev->dev);
 		pm_runtime_put(&pdev->dev);
-		goto no_cpdma_chan;
+		goto err_napi_del;
 	}
 	}
 
 
 
 
@@ -1937,11 +1937,13 @@ static int davinci_emac_probe(struct platform_device *pdev)
 
 
 	return 0;
 	return 0;
 
 
-no_cpdma_chan:
-	if (priv->txchan)
-		cpdma_chan_destroy(priv->txchan);
-	if (priv->rxchan)
-		cpdma_chan_destroy(priv->rxchan);
+err_napi_del:
+	netif_napi_del(&priv->napi);
+err_free_rxchan:
+	cpdma_chan_destroy(priv->rxchan);
+err_free_txchan:
+	cpdma_chan_destroy(priv->txchan);
+err_free_dma:
 	cpdma_ctlr_destroy(priv->dma);
 	cpdma_ctlr_destroy(priv->dma);
 no_pdata:
 no_pdata:
 	if (of_phy_is_fixed_link(np))
 	if (of_phy_is_fixed_link(np))

+ 9 - 6
drivers/net/tun.c

@@ -1650,7 +1650,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
 	else
 	else
 		*skb_xdp = 0;
 		*skb_xdp = 0;
 
 
-	preempt_disable();
+	local_bh_disable();
 	rcu_read_lock();
 	rcu_read_lock();
 	xdp_prog = rcu_dereference(tun->xdp_prog);
 	xdp_prog = rcu_dereference(tun->xdp_prog);
 	if (xdp_prog && !*skb_xdp) {
 	if (xdp_prog && !*skb_xdp) {
@@ -1675,7 +1675,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
 			if (err)
 			if (err)
 				goto err_redirect;
 				goto err_redirect;
 			rcu_read_unlock();
 			rcu_read_unlock();
-			preempt_enable();
+			local_bh_enable();
 			return NULL;
 			return NULL;
 		case XDP_TX:
 		case XDP_TX:
 			get_page(alloc_frag->page);
 			get_page(alloc_frag->page);
@@ -1684,7 +1684,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
 				goto err_redirect;
 				goto err_redirect;
 			tun_xdp_flush(tun->dev);
 			tun_xdp_flush(tun->dev);
 			rcu_read_unlock();
 			rcu_read_unlock();
-			preempt_enable();
+			local_bh_enable();
 			return NULL;
 			return NULL;
 		case XDP_PASS:
 		case XDP_PASS:
 			delta = orig_data - xdp.data;
 			delta = orig_data - xdp.data;
@@ -1703,7 +1703,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
 	skb = build_skb(buf, buflen);
 	skb = build_skb(buf, buflen);
 	if (!skb) {
 	if (!skb) {
 		rcu_read_unlock();
 		rcu_read_unlock();
-		preempt_enable();
+		local_bh_enable();
 		return ERR_PTR(-ENOMEM);
 		return ERR_PTR(-ENOMEM);
 	}
 	}
 
 
@@ -1713,7 +1713,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
 	alloc_frag->offset += buflen;
 	alloc_frag->offset += buflen;
 
 
 	rcu_read_unlock();
 	rcu_read_unlock();
-	preempt_enable();
+	local_bh_enable();
 
 
 	return skb;
 	return skb;
 
 
@@ -1721,7 +1721,7 @@ err_redirect:
 	put_page(alloc_frag->page);
 	put_page(alloc_frag->page);
 err_xdp:
 err_xdp:
 	rcu_read_unlock();
 	rcu_read_unlock();
-	preempt_enable();
+	local_bh_enable();
 	this_cpu_inc(tun->pcpu_stats->rx_dropped);
 	this_cpu_inc(tun->pcpu_stats->rx_dropped);
 	return NULL;
 	return NULL;
 }
 }
@@ -1917,16 +1917,19 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 		struct bpf_prog *xdp_prog;
 		struct bpf_prog *xdp_prog;
 		int ret;
 		int ret;
 
 
+		local_bh_disable();
 		rcu_read_lock();
 		rcu_read_lock();
 		xdp_prog = rcu_dereference(tun->xdp_prog);
 		xdp_prog = rcu_dereference(tun->xdp_prog);
 		if (xdp_prog) {
 		if (xdp_prog) {
 			ret = do_xdp_generic(xdp_prog, skb);
 			ret = do_xdp_generic(xdp_prog, skb);
 			if (ret != XDP_PASS) {
 			if (ret != XDP_PASS) {
 				rcu_read_unlock();
 				rcu_read_unlock();
+				local_bh_enable();
 				return total_len;
 				return total_len;
 			}
 			}
 		}
 		}
 		rcu_read_unlock();
 		rcu_read_unlock();
+		local_bh_enable();
 	}
 	}
 
 
 	rcu_read_lock();
 	rcu_read_lock();

+ 1 - 1
drivers/net/usb/cdc_mbim.c

@@ -609,7 +609,7 @@ static const struct driver_info cdc_mbim_info_ndp_to_end = {
  */
  */
 static const struct driver_info cdc_mbim_info_avoid_altsetting_toggle = {
 static const struct driver_info cdc_mbim_info_avoid_altsetting_toggle = {
 	.description = "CDC MBIM",
 	.description = "CDC MBIM",
-	.flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN,
+	.flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN | FLAG_SEND_ZLP,
 	.bind = cdc_mbim_bind,
 	.bind = cdc_mbim_bind,
 	.unbind = cdc_mbim_unbind,
 	.unbind = cdc_mbim_unbind,
 	.manage_power = cdc_mbim_manage_power,
 	.manage_power = cdc_mbim_manage_power,

+ 1 - 0
drivers/net/usb/qmi_wwan.c

@@ -1103,6 +1103,7 @@ static const struct usb_device_id products[] = {
 	{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
 	{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
 	{QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)},	/* YUGA CLM920-NC5 */
 	{QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)},	/* YUGA CLM920-NC5 */
 	{QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
 	{QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
+	{QMI_FIXED_INTF(0x0846, 0x68d3, 8)},	/* Netgear Aircard 779S */
 	{QMI_FIXED_INTF(0x12d1, 0x140c, 1)},	/* Huawei E173 */
 	{QMI_FIXED_INTF(0x12d1, 0x140c, 1)},	/* Huawei E173 */
 	{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)},	/* Huawei E1820 */
 	{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)},	/* Huawei E1820 */
 	{QMI_FIXED_INTF(0x1435, 0xd181, 3)},	/* Wistron NeWeb D18Q1 */
 	{QMI_FIXED_INTF(0x1435, 0xd181, 3)},	/* Wistron NeWeb D18Q1 */

+ 5 - 5
drivers/net/wireless/intel/iwlwifi/pcie/trans.c

@@ -1590,14 +1590,13 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
 					struct iwl_trans *trans)
 					struct iwl_trans *trans)
 {
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	int max_irqs, num_irqs, i, ret, nr_online_cpus;
+	int max_irqs, num_irqs, i, ret;
 	u16 pci_cmd;
 	u16 pci_cmd;
 
 
 	if (!trans->cfg->mq_rx_supported)
 	if (!trans->cfg->mq_rx_supported)
 		goto enable_msi;
 		goto enable_msi;
 
 
-	nr_online_cpus = num_online_cpus();
-	max_irqs = min_t(u32, nr_online_cpus + 2, IWL_MAX_RX_HW_QUEUES);
+	max_irqs = min_t(u32, num_online_cpus() + 2, IWL_MAX_RX_HW_QUEUES);
 	for (i = 0; i < max_irqs; i++)
 	for (i = 0; i < max_irqs; i++)
 		trans_pcie->msix_entries[i].entry = i;
 		trans_pcie->msix_entries[i].entry = i;
 
 
@@ -1623,16 +1622,17 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
 	 * Two interrupts less: non rx causes shared with FBQ and RSS.
 	 * Two interrupts less: non rx causes shared with FBQ and RSS.
 	 * More than two interrupts: we will use fewer RSS queues.
 	 * More than two interrupts: we will use fewer RSS queues.
 	 */
 	 */
-	if (num_irqs <= nr_online_cpus) {
+	if (num_irqs <= max_irqs - 2) {
 		trans_pcie->trans->num_rx_queues = num_irqs + 1;
 		trans_pcie->trans->num_rx_queues = num_irqs + 1;
 		trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
 		trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
 			IWL_SHARED_IRQ_FIRST_RSS;
 			IWL_SHARED_IRQ_FIRST_RSS;
-	} else if (num_irqs == nr_online_cpus + 1) {
+	} else if (num_irqs == max_irqs - 1) {
 		trans_pcie->trans->num_rx_queues = num_irqs;
 		trans_pcie->trans->num_rx_queues = num_irqs;
 		trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
 		trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
 	} else {
 	} else {
 		trans_pcie->trans->num_rx_queues = num_irqs - 1;
 		trans_pcie->trans->num_rx_queues = num_irqs - 1;
 	}
 	}
+	WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES);
 
 
 	trans_pcie->alloc_vecs = num_irqs;
 	trans_pcie->alloc_vecs = num_irqs;
 	trans_pcie->msix_enabled = true;
 	trans_pcie->msix_enabled = true;

+ 3 - 4
drivers/net/wireless/ralink/rt2x00/rt2x00queue.c

@@ -372,16 +372,15 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
 
 
 	/*
 	/*
 	 * Determine IFS values
 	 * Determine IFS values
-	 * - Use TXOP_BACKOFF for probe and management frames except beacons
+	 * - Use TXOP_BACKOFF for management frames except beacons
 	 * - Use TXOP_SIFS for fragment bursts
 	 * - Use TXOP_SIFS for fragment bursts
 	 * - Use TXOP_HTTXOP for everything else
 	 * - Use TXOP_HTTXOP for everything else
 	 *
 	 *
 	 * Note: rt2800 devices won't use CTS protection (if used)
 	 * Note: rt2800 devices won't use CTS protection (if used)
 	 * for frames not transmitted with TXOP_HTTXOP
 	 * for frames not transmitted with TXOP_HTTXOP
 	 */
 	 */
-	if ((ieee80211_is_mgmt(hdr->frame_control) &&
-	     !ieee80211_is_beacon(hdr->frame_control)) ||
-	    (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
+	if (ieee80211_is_mgmt(hdr->frame_control) &&
+	    !ieee80211_is_beacon(hdr->frame_control))
 		txdesc->u.ht.txop = TXOP_BACKOFF;
 		txdesc->u.ht.txop = TXOP_BACKOFF;
 	else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
 	else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
 		txdesc->u.ht.txop = TXOP_SIFS;
 		txdesc->u.ht.txop = TXOP_SIFS;

+ 1 - 1
drivers/nvme/host/Kconfig

@@ -27,7 +27,7 @@ config NVME_FABRICS
 
 
 config NVME_RDMA
 config NVME_RDMA
 	tristate "NVM Express over Fabrics RDMA host driver"
 	tristate "NVM Express over Fabrics RDMA host driver"
-	depends on INFINIBAND_ADDR_TRANS && BLOCK
+	depends on INFINIBAND && INFINIBAND_ADDR_TRANS && BLOCK
 	select NVME_CORE
 	select NVME_CORE
 	select NVME_FABRICS
 	select NVME_FABRICS
 	select SG_POOL
 	select SG_POOL

+ 1 - 1
drivers/nvme/host/core.c

@@ -1447,8 +1447,8 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
 	if (ns->lba_shift == 0)
 	if (ns->lba_shift == 0)
 		ns->lba_shift = 9;
 		ns->lba_shift = 9;
 	ns->noiob = le16_to_cpu(id->noiob);
 	ns->noiob = le16_to_cpu(id->noiob);
-	ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
 	ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
 	ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
+	ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
 	/* the PI implementation requires metadata equal t10 pi tuple size */
 	/* the PI implementation requires metadata equal t10 pi tuple size */
 	if (ns->ms == sizeof(struct t10_pi_tuple))
 	if (ns->ms == sizeof(struct t10_pi_tuple))
 		ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
 		ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;

+ 1 - 1
drivers/nvme/target/Kconfig

@@ -27,7 +27,7 @@ config NVME_TARGET_LOOP
 
 
 config NVME_TARGET_RDMA
 config NVME_TARGET_RDMA
 	tristate "NVMe over Fabrics RDMA target support"
 	tristate "NVMe over Fabrics RDMA target support"
-	depends on INFINIBAND_ADDR_TRANS
+	depends on INFINIBAND && INFINIBAND_ADDR_TRANS
 	depends on NVME_TARGET
 	depends on NVME_TARGET
 	select SGL_ALLOC
 	select SGL_ALLOC
 	help
 	help

+ 13 - 10
drivers/platform/x86/asus-wmi.c

@@ -163,6 +163,16 @@ MODULE_LICENSE("GPL");
 
 
 static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL };
 static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL };
 
 
+static bool ashs_present(void)
+{
+	int i = 0;
+	while (ashs_ids[i]) {
+		if (acpi_dev_found(ashs_ids[i++]))
+			return true;
+	}
+	return false;
+}
+
 struct bios_args {
 struct bios_args {
 	u32 arg0;
 	u32 arg0;
 	u32 arg1;
 	u32 arg1;
@@ -1025,6 +1035,9 @@ static int asus_new_rfkill(struct asus_wmi *asus,
 
 
 static void asus_wmi_rfkill_exit(struct asus_wmi *asus)
 static void asus_wmi_rfkill_exit(struct asus_wmi *asus)
 {
 {
+	if (asus->driver->wlan_ctrl_by_user && ashs_present())
+		return;
+
 	asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P5");
 	asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P5");
 	asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P6");
 	asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P6");
 	asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P7");
 	asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P7");
@@ -2121,16 +2134,6 @@ static int asus_wmi_fan_init(struct asus_wmi *asus)
 	return 0;
 	return 0;
 }
 }
 
 
-static bool ashs_present(void)
-{
-	int i = 0;
-	while (ashs_ids[i]) {
-		if (acpi_dev_found(ashs_ids[i++]))
-			return true;
-	}
-	return false;
-}
-
 /*
 /*
  * WMI Driver
  * WMI Driver
  */
  */

+ 5 - 2
drivers/s390/block/dasd.c

@@ -3034,7 +3034,8 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
 	cqr->callback_data = req;
 	cqr->callback_data = req;
 	cqr->status = DASD_CQR_FILLED;
 	cqr->status = DASD_CQR_FILLED;
 	cqr->dq = dq;
 	cqr->dq = dq;
-	req->completion_data = cqr;
+	*((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)) = cqr;
+
 	blk_mq_start_request(req);
 	blk_mq_start_request(req);
 	spin_lock(&block->queue_lock);
 	spin_lock(&block->queue_lock);
 	list_add_tail(&cqr->blocklist, &block->ccw_queue);
 	list_add_tail(&cqr->blocklist, &block->ccw_queue);
@@ -3058,12 +3059,13 @@ out:
  */
  */
 enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
 enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
 {
 {
-	struct dasd_ccw_req *cqr = req->completion_data;
 	struct dasd_block *block = req->q->queuedata;
 	struct dasd_block *block = req->q->queuedata;
 	struct dasd_device *device;
 	struct dasd_device *device;
+	struct dasd_ccw_req *cqr;
 	unsigned long flags;
 	unsigned long flags;
 	int rc = 0;
 	int rc = 0;
 
 
+	cqr = *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req));
 	if (!cqr)
 	if (!cqr)
 		return BLK_EH_NOT_HANDLED;
 		return BLK_EH_NOT_HANDLED;
 
 
@@ -3169,6 +3171,7 @@ static int dasd_alloc_queue(struct dasd_block *block)
 	int rc;
 	int rc;
 
 
 	block->tag_set.ops = &dasd_mq_ops;
 	block->tag_set.ops = &dasd_mq_ops;
+	block->tag_set.cmd_size = sizeof(struct dasd_ccw_req *);
 	block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES;
 	block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES;
 	block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV;
 	block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV;
 	block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
 	block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;

+ 20 - 2
drivers/scsi/scsi_transport_srp.c

@@ -51,6 +51,8 @@ struct srp_internal {
 	struct transport_container rport_attr_cont;
 	struct transport_container rport_attr_cont;
 };
 };
 
 
+static int scsi_is_srp_rport(const struct device *dev);
+
 #define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t)
 #define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t)
 
 
 #define	dev_to_rport(d)	container_of(d, struct srp_rport, dev)
 #define	dev_to_rport(d)	container_of(d, struct srp_rport, dev)
@@ -60,9 +62,24 @@ static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r)
 	return dev_to_shost(r->dev.parent);
 	return dev_to_shost(r->dev.parent);
 }
 }
 
 
+static int find_child_rport(struct device *dev, void *data)
+{
+	struct device **child = data;
+
+	if (scsi_is_srp_rport(dev)) {
+		WARN_ON_ONCE(*child);
+		*child = dev;
+	}
+	return 0;
+}
+
 static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost)
 static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost)
 {
 {
-	return transport_class_to_srp_rport(&shost->shost_gendev);
+	struct device *child = NULL;
+
+	WARN_ON_ONCE(device_for_each_child(&shost->shost_gendev, &child,
+					   find_child_rport) < 0);
+	return child ? dev_to_rport(child) : NULL;
 }
 }
 
 
 /**
 /**
@@ -600,7 +617,8 @@ enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd)
 	struct srp_rport *rport = shost_to_rport(shost);
 	struct srp_rport *rport = shost_to_rport(shost);
 
 
 	pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev));
 	pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev));
-	return rport->fast_io_fail_tmo < 0 && rport->dev_loss_tmo < 0 &&
+	return rport && rport->fast_io_fail_tmo < 0 &&
+		rport->dev_loss_tmo < 0 &&
 		i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ?
 		i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ?
 		BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
 		BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
 }
 }

+ 0 - 36
drivers/soc/lantiq/gphy.c

@@ -30,7 +30,6 @@ struct xway_gphy_priv {
 	struct clk *gphy_clk_gate;
 	struct clk *gphy_clk_gate;
 	struct reset_control *gphy_reset;
 	struct reset_control *gphy_reset;
 	struct reset_control *gphy_reset2;
 	struct reset_control *gphy_reset2;
-	struct notifier_block gphy_reboot_nb;
 	void __iomem *membase;
 	void __iomem *membase;
 	char *fw_name;
 	char *fw_name;
 };
 };
@@ -64,24 +63,6 @@ static const struct of_device_id xway_gphy_match[] = {
 };
 };
 MODULE_DEVICE_TABLE(of, xway_gphy_match);
 MODULE_DEVICE_TABLE(of, xway_gphy_match);
 
 
-static struct xway_gphy_priv *to_xway_gphy_priv(struct notifier_block *nb)
-{
-	return container_of(nb, struct xway_gphy_priv, gphy_reboot_nb);
-}
-
-static int xway_gphy_reboot_notify(struct notifier_block *reboot_nb,
-				   unsigned long code, void *unused)
-{
-	struct xway_gphy_priv *priv = to_xway_gphy_priv(reboot_nb);
-
-	if (priv) {
-		reset_control_assert(priv->gphy_reset);
-		reset_control_assert(priv->gphy_reset2);
-	}
-
-	return NOTIFY_DONE;
-}
-
 static int xway_gphy_load(struct device *dev, struct xway_gphy_priv *priv,
 static int xway_gphy_load(struct device *dev, struct xway_gphy_priv *priv,
 			  dma_addr_t *dev_addr)
 			  dma_addr_t *dev_addr)
 {
 {
@@ -205,14 +186,6 @@ static int xway_gphy_probe(struct platform_device *pdev)
 	reset_control_deassert(priv->gphy_reset);
 	reset_control_deassert(priv->gphy_reset);
 	reset_control_deassert(priv->gphy_reset2);
 	reset_control_deassert(priv->gphy_reset2);
 
 
-	/* assert the gphy reset because it can hang after a reboot: */
-	priv->gphy_reboot_nb.notifier_call = xway_gphy_reboot_notify;
-	priv->gphy_reboot_nb.priority = -1;
-
-	ret = register_reboot_notifier(&priv->gphy_reboot_nb);
-	if (ret)
-		dev_warn(dev, "Failed to register reboot notifier\n");
-
 	platform_set_drvdata(pdev, priv);
 	platform_set_drvdata(pdev, priv);
 
 
 	return ret;
 	return ret;
@@ -220,21 +193,12 @@ static int xway_gphy_probe(struct platform_device *pdev)
 
 
 static int xway_gphy_remove(struct platform_device *pdev)
 static int xway_gphy_remove(struct platform_device *pdev)
 {
 {
-	struct device *dev = &pdev->dev;
 	struct xway_gphy_priv *priv = platform_get_drvdata(pdev);
 	struct xway_gphy_priv *priv = platform_get_drvdata(pdev);
-	int ret;
-
-	reset_control_assert(priv->gphy_reset);
-	reset_control_assert(priv->gphy_reset2);
 
 
 	iowrite32be(0, priv->membase);
 	iowrite32be(0, priv->membase);
 
 
 	clk_disable_unprepare(priv->gphy_clk_gate);
 	clk_disable_unprepare(priv->gphy_clk_gate);
 
 
-	ret = unregister_reboot_notifier(&priv->gphy_reboot_nb);
-	if (ret)
-		dev_warn(dev, "Failed to unregister reboot notifier\n");
-
 	return 0;
 	return 0;
 }
 }
 
 

+ 1 - 1
drivers/staging/lustre/lnet/Kconfig

@@ -34,7 +34,7 @@ config LNET_SELFTEST
 
 
 config LNET_XPRT_IB
 config LNET_XPRT_IB
 	tristate "LNET infiniband support"
 	tristate "LNET infiniband support"
-	depends on LNET && PCI && INFINIBAND_ADDR_TRANS
+	depends on LNET && PCI && INFINIBAND && INFINIBAND_ADDR_TRANS
 	default LNET && INFINIBAND
 	default LNET && INFINIBAND
 	help
 	help
 	  This option allows the LNET users to use infiniband as an
 	  This option allows the LNET users to use infiniband as an

+ 1 - 1
drivers/thunderbolt/icm.c

@@ -1255,7 +1255,7 @@ static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids)
 			/* Map empty entries to null UUID */
 			/* Map empty entries to null UUID */
 			uuid[0] = 0;
 			uuid[0] = 0;
 			uuid[1] = 0;
 			uuid[1] = 0;
-		} else {
+		} else if (uuid[0] != 0 || uuid[1] != 0) {
 			/* Upper two DWs are always one's */
 			/* Upper two DWs are always one's */
 			uuid[2] = 0xffffffff;
 			uuid[2] = 0xffffffff;
 			uuid[3] = 0xffffffff;
 			uuid[3] = 0xffffffff;

+ 10 - 15
drivers/vfio/vfio_iommu_type1.c

@@ -404,6 +404,7 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
 {
 {
 	unsigned long pfn = 0;
 	unsigned long pfn = 0;
 	long ret, pinned = 0, lock_acct = 0;
 	long ret, pinned = 0, lock_acct = 0;
+	bool rsvd;
 	dma_addr_t iova = vaddr - dma->vaddr + dma->iova;
 	dma_addr_t iova = vaddr - dma->vaddr + dma->iova;
 
 
 	/* This code path is only user initiated */
 	/* This code path is only user initiated */
@@ -414,23 +415,14 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	if (is_invalid_reserved_pfn(*pfn_base)) {
-		struct vm_area_struct *vma;
-
-		down_read(&current->mm->mmap_sem);
-		vma = find_vma_intersection(current->mm, vaddr, vaddr + 1);
-		pinned = min_t(long, npage, vma_pages(vma));
-		up_read(&current->mm->mmap_sem);
-		return pinned;
-	}
-
 	pinned++;
 	pinned++;
+	rsvd = is_invalid_reserved_pfn(*pfn_base);
 
 
 	/*
 	/*
 	 * Reserved pages aren't counted against the user, externally pinned
 	 * Reserved pages aren't counted against the user, externally pinned
 	 * pages are already counted against the user.
 	 * pages are already counted against the user.
 	 */
 	 */
-	if (!vfio_find_vpfn(dma, iova)) {
+	if (!rsvd && !vfio_find_vpfn(dma, iova)) {
 		if (!lock_cap && current->mm->locked_vm + 1 > limit) {
 		if (!lock_cap && current->mm->locked_vm + 1 > limit) {
 			put_pfn(*pfn_base, dma->prot);
 			put_pfn(*pfn_base, dma->prot);
 			pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
 			pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
@@ -450,12 +442,13 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
 		if (ret)
 		if (ret)
 			break;
 			break;
 
 
-		if (pfn != *pfn_base + pinned) {
+		if (pfn != *pfn_base + pinned ||
+		    rsvd != is_invalid_reserved_pfn(pfn)) {
 			put_pfn(pfn, dma->prot);
 			put_pfn(pfn, dma->prot);
 			break;
 			break;
 		}
 		}
 
 
-		if (!vfio_find_vpfn(dma, iova)) {
+		if (!rsvd && !vfio_find_vpfn(dma, iova)) {
 			if (!lock_cap &&
 			if (!lock_cap &&
 			    current->mm->locked_vm + lock_acct + 1 > limit) {
 			    current->mm->locked_vm + lock_acct + 1 > limit) {
 				put_pfn(pfn, dma->prot);
 				put_pfn(pfn, dma->prot);
@@ -473,8 +466,10 @@ out:
 
 
 unpin_out:
 unpin_out:
 	if (ret) {
 	if (ret) {
-		for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
-			put_pfn(pfn, dma->prot);
+		if (!rsvd) {
+			for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
+				put_pfn(pfn, dma->prot);
+		}
 
 
 		return ret;
 		return ret;
 	}
 	}

+ 24 - 13
drivers/vhost/net.c

@@ -105,7 +105,9 @@ struct vhost_net_virtqueue {
 	/* vhost zerocopy support fields below: */
 	/* vhost zerocopy support fields below: */
 	/* last used idx for outstanding DMA zerocopy buffers */
 	/* last used idx for outstanding DMA zerocopy buffers */
 	int upend_idx;
 	int upend_idx;
-	/* first used idx for DMA done zerocopy buffers */
+	/* For TX, first used idx for DMA done zerocopy buffers
+	 * For RX, number of batched heads
+	 */
 	int done_idx;
 	int done_idx;
 	/* an array of userspace buffers info */
 	/* an array of userspace buffers info */
 	struct ubuf_info *ubuf_info;
 	struct ubuf_info *ubuf_info;
@@ -626,6 +628,18 @@ static int sk_has_rx_data(struct sock *sk)
 	return skb_queue_empty(&sk->sk_receive_queue);
 	return skb_queue_empty(&sk->sk_receive_queue);
 }
 }
 
 
+static void vhost_rx_signal_used(struct vhost_net_virtqueue *nvq)
+{
+	struct vhost_virtqueue *vq = &nvq->vq;
+	struct vhost_dev *dev = vq->dev;
+
+	if (!nvq->done_idx)
+		return;
+
+	vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx);
+	nvq->done_idx = 0;
+}
+
 static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
 static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
 {
 {
 	struct vhost_net_virtqueue *rvq = &net->vqs[VHOST_NET_VQ_RX];
 	struct vhost_net_virtqueue *rvq = &net->vqs[VHOST_NET_VQ_RX];
@@ -635,6 +649,8 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
 	int len = peek_head_len(rvq, sk);
 	int len = peek_head_len(rvq, sk);
 
 
 	if (!len && vq->busyloop_timeout) {
 	if (!len && vq->busyloop_timeout) {
+		/* Flush batched heads first */
+		vhost_rx_signal_used(rvq);
 		/* Both tx vq and rx socket were polled here */
 		/* Both tx vq and rx socket were polled here */
 		mutex_lock_nested(&vq->mutex, 1);
 		mutex_lock_nested(&vq->mutex, 1);
 		vhost_disable_notify(&net->dev, vq);
 		vhost_disable_notify(&net->dev, vq);
@@ -762,7 +778,7 @@ static void handle_rx(struct vhost_net *net)
 	};
 	};
 	size_t total_len = 0;
 	size_t total_len = 0;
 	int err, mergeable;
 	int err, mergeable;
-	s16 headcount, nheads = 0;
+	s16 headcount;
 	size_t vhost_hlen, sock_hlen;
 	size_t vhost_hlen, sock_hlen;
 	size_t vhost_len, sock_len;
 	size_t vhost_len, sock_len;
 	struct socket *sock;
 	struct socket *sock;
@@ -790,8 +806,8 @@ static void handle_rx(struct vhost_net *net)
 	while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) {
 	while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) {
 		sock_len += sock_hlen;
 		sock_len += sock_hlen;
 		vhost_len = sock_len + vhost_hlen;
 		vhost_len = sock_len + vhost_hlen;
-		headcount = get_rx_bufs(vq, vq->heads + nheads, vhost_len,
-					&in, vq_log, &log,
+		headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
+					vhost_len, &in, vq_log, &log,
 					likely(mergeable) ? UIO_MAXIOV : 1);
 					likely(mergeable) ? UIO_MAXIOV : 1);
 		/* On error, stop handling until the next kick. */
 		/* On error, stop handling until the next kick. */
 		if (unlikely(headcount < 0))
 		if (unlikely(headcount < 0))
@@ -862,12 +878,9 @@ static void handle_rx(struct vhost_net *net)
 			vhost_discard_vq_desc(vq, headcount);
 			vhost_discard_vq_desc(vq, headcount);
 			goto out;
 			goto out;
 		}
 		}
-		nheads += headcount;
-		if (nheads > VHOST_RX_BATCH) {
-			vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
-						    nheads);
-			nheads = 0;
-		}
+		nvq->done_idx += headcount;
+		if (nvq->done_idx > VHOST_RX_BATCH)
+			vhost_rx_signal_used(nvq);
 		if (unlikely(vq_log))
 		if (unlikely(vq_log))
 			vhost_log_write(vq, vq_log, log, vhost_len);
 			vhost_log_write(vq, vq_log, log, vhost_len);
 		total_len += vhost_len;
 		total_len += vhost_len;
@@ -878,9 +891,7 @@ static void handle_rx(struct vhost_net *net)
 	}
 	}
 	vhost_net_enable_vq(net, vq);
 	vhost_net_enable_vq(net, vq);
 out:
 out:
-	if (nheads)
-		vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
-					    nheads);
+	vhost_rx_signal_used(nvq);
 	mutex_unlock(&vq->mutex);
 	mutex_unlock(&vq->mutex);
 }
 }
 
 

+ 3 - 7
fs/afs/security.c

@@ -372,18 +372,14 @@ int afs_permission(struct inode *inode, int mask)
 	       mask, access, S_ISDIR(inode->i_mode) ? "dir" : "file");
 	       mask, access, S_ISDIR(inode->i_mode) ? "dir" : "file");
 
 
 	if (S_ISDIR(inode->i_mode)) {
 	if (S_ISDIR(inode->i_mode)) {
-		if (mask & MAY_EXEC) {
+		if (mask & (MAY_EXEC | MAY_READ | MAY_CHDIR)) {
 			if (!(access & AFS_ACE_LOOKUP))
 			if (!(access & AFS_ACE_LOOKUP))
 				goto permission_denied;
 				goto permission_denied;
-		} else if (mask & MAY_READ) {
-			if (!(access & AFS_ACE_LOOKUP))
-				goto permission_denied;
-		} else if (mask & MAY_WRITE) {
+		}
+		if (mask & MAY_WRITE) {
 			if (!(access & (AFS_ACE_DELETE | /* rmdir, unlink, rename from */
 			if (!(access & (AFS_ACE_DELETE | /* rmdir, unlink, rename from */
 					AFS_ACE_INSERT))) /* create, mkdir, symlink, rename to */
 					AFS_ACE_INSERT))) /* create, mkdir, symlink, rename to */
 				goto permission_denied;
 				goto permission_denied;
-		} else {
-			BUG();
 		}
 		}
 	} else {
 	} else {
 		if (!(access & AFS_ACE_LOOKUP))
 		if (!(access & AFS_ACE_LOOKUP))

+ 10 - 9
fs/afs/vlclient.c

@@ -23,7 +23,7 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
 	struct afs_uvldbentry__xdr *uvldb;
 	struct afs_uvldbentry__xdr *uvldb;
 	struct afs_vldb_entry *entry;
 	struct afs_vldb_entry *entry;
 	bool new_only = false;
 	bool new_only = false;
-	u32 tmp, nr_servers;
+	u32 tmp, nr_servers, vlflags;
 	int i, ret;
 	int i, ret;
 
 
 	_enter("");
 	_enter("");
@@ -55,6 +55,7 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
 			new_only = true;
 			new_only = true;
 	}
 	}
 
 
+	vlflags = ntohl(uvldb->flags);
 	for (i = 0; i < nr_servers; i++) {
 	for (i = 0; i < nr_servers; i++) {
 		struct afs_uuid__xdr *xdr;
 		struct afs_uuid__xdr *xdr;
 		struct afs_uuid *uuid;
 		struct afs_uuid *uuid;
@@ -64,12 +65,13 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
 		if (tmp & AFS_VLSF_DONTUSE ||
 		if (tmp & AFS_VLSF_DONTUSE ||
 		    (new_only && !(tmp & AFS_VLSF_NEWREPSITE)))
 		    (new_only && !(tmp & AFS_VLSF_NEWREPSITE)))
 			continue;
 			continue;
-		if (tmp & AFS_VLSF_RWVOL)
+		if (tmp & AFS_VLSF_RWVOL) {
 			entry->fs_mask[i] |= AFS_VOL_VTM_RW;
 			entry->fs_mask[i] |= AFS_VOL_VTM_RW;
+			if (vlflags & AFS_VLF_BACKEXISTS)
+				entry->fs_mask[i] |= AFS_VOL_VTM_BAK;
+		}
 		if (tmp & AFS_VLSF_ROVOL)
 		if (tmp & AFS_VLSF_ROVOL)
 			entry->fs_mask[i] |= AFS_VOL_VTM_RO;
 			entry->fs_mask[i] |= AFS_VOL_VTM_RO;
-		if (tmp & AFS_VLSF_BACKVOL)
-			entry->fs_mask[i] |= AFS_VOL_VTM_BAK;
 		if (!entry->fs_mask[i])
 		if (!entry->fs_mask[i])
 			continue;
 			continue;
 
 
@@ -89,15 +91,14 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
 	for (i = 0; i < AFS_MAXTYPES; i++)
 	for (i = 0; i < AFS_MAXTYPES; i++)
 		entry->vid[i] = ntohl(uvldb->volumeId[i]);
 		entry->vid[i] = ntohl(uvldb->volumeId[i]);
 
 
-	tmp = ntohl(uvldb->flags);
-	if (tmp & AFS_VLF_RWEXISTS)
+	if (vlflags & AFS_VLF_RWEXISTS)
 		__set_bit(AFS_VLDB_HAS_RW, &entry->flags);
 		__set_bit(AFS_VLDB_HAS_RW, &entry->flags);
-	if (tmp & AFS_VLF_ROEXISTS)
+	if (vlflags & AFS_VLF_ROEXISTS)
 		__set_bit(AFS_VLDB_HAS_RO, &entry->flags);
 		__set_bit(AFS_VLDB_HAS_RO, &entry->flags);
-	if (tmp & AFS_VLF_BACKEXISTS)
+	if (vlflags & AFS_VLF_BACKEXISTS)
 		__set_bit(AFS_VLDB_HAS_BAK, &entry->flags);
 		__set_bit(AFS_VLDB_HAS_BAK, &entry->flags);
 
 
-	if (!(tmp & (AFS_VLF_RWEXISTS | AFS_VLF_ROEXISTS | AFS_VLF_BACKEXISTS))) {
+	if (!(vlflags & (AFS_VLF_RWEXISTS | AFS_VLF_ROEXISTS | AFS_VLF_BACKEXISTS))) {
 		entry->error = -ENOMEDIUM;
 		entry->error = -ENOMEDIUM;
 		__set_bit(AFS_VLDB_QUERY_ERROR, &entry->flags);
 		__set_bit(AFS_VLDB_QUERY_ERROR, &entry->flags);
 	}
 	}

+ 1 - 1
fs/cifs/Kconfig

@@ -197,7 +197,7 @@ config CIFS_SMB311
 
 
 config CIFS_SMB_DIRECT
 config CIFS_SMB_DIRECT
 	bool "SMB Direct support (Experimental)"
 	bool "SMB Direct support (Experimental)"
-	depends on CIFS=m && INFINIBAND_ADDR_TRANS || CIFS=y && INFINIBAND_ADDR_TRANS=y
+	depends on CIFS=m && INFINIBAND && INFINIBAND_ADDR_TRANS || CIFS=y && INFINIBAND=y && INFINIBAND_ADDR_TRANS=y
 	help
 	help
 	  Enables SMB Direct experimental support for SMB 3.0, 3.02 and 3.1.1.
 	  Enables SMB Direct experimental support for SMB 3.0, 3.02 and 3.1.1.
 	  SMB Direct allows transferring SMB packets over RDMA. If unsure,
 	  SMB Direct allows transferring SMB packets over RDMA. If unsure,

+ 1 - 0
fs/inode.c

@@ -178,6 +178,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
 	mapping->a_ops = &empty_aops;
 	mapping->a_ops = &empty_aops;
 	mapping->host = inode;
 	mapping->host = inode;
 	mapping->flags = 0;
 	mapping->flags = 0;
+	mapping->wb_err = 0;
 	atomic_set(&mapping->i_mmap_writable, 0);
 	atomic_set(&mapping->i_mmap_writable, 0);
 	mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
 	mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
 	mapping->private_data = NULL;
 	mapping->private_data = NULL;

+ 1 - 1
include/drm/bridge/dw_hdmi.h

@@ -151,7 +151,7 @@ struct dw_hdmi *dw_hdmi_bind(struct platform_device *pdev,
 			     struct drm_encoder *encoder,
 			     struct drm_encoder *encoder,
 			     const struct dw_hdmi_plat_data *plat_data);
 			     const struct dw_hdmi_plat_data *plat_data);
 
 
-void dw_hdmi_setup_rx_sense(struct device *dev, bool hpd, bool rx_sense);
+void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense);
 
 
 void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate);
 void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate);
 void dw_hdmi_audio_enable(struct dw_hdmi *hdmi);
 void dw_hdmi_audio_enable(struct dw_hdmi *hdmi);

+ 3 - 3
include/linux/iio/buffer_impl.h

@@ -53,7 +53,7 @@ struct iio_buffer_access_funcs {
 	int (*request_update)(struct iio_buffer *buffer);
 	int (*request_update)(struct iio_buffer *buffer);
 
 
 	int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
 	int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
-	int (*set_length)(struct iio_buffer *buffer, int length);
+	int (*set_length)(struct iio_buffer *buffer, unsigned int length);
 
 
 	int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
 	int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
 	int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
 	int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
@@ -72,10 +72,10 @@ struct iio_buffer_access_funcs {
  */
  */
 struct iio_buffer {
 struct iio_buffer {
 	/** @length: Number of datums in buffer. */
 	/** @length: Number of datums in buffer. */
-	int length;
+	unsigned int length;
 
 
 	/**  @bytes_per_datum: Size of individual datum including timestamp. */
 	/**  @bytes_per_datum: Size of individual datum including timestamp. */
-	int bytes_per_datum;
+	size_t bytes_per_datum;
 
 
 	/**
 	/**
 	 * @access: Buffer access functions associated with the
 	 * @access: Buffer access functions associated with the

+ 2 - 0
include/uapi/linux/bpf.h

@@ -1017,6 +1017,7 @@ struct bpf_prog_info {
 	__aligned_u64 map_ids;
 	__aligned_u64 map_ids;
 	char name[BPF_OBJ_NAME_LEN];
 	char name[BPF_OBJ_NAME_LEN];
 	__u32 ifindex;
 	__u32 ifindex;
+	__u32 :32;
 	__u64 netns_dev;
 	__u64 netns_dev;
 	__u64 netns_ino;
 	__u64 netns_ino;
 } __attribute__((aligned(8)));
 } __attribute__((aligned(8)));
@@ -1030,6 +1031,7 @@ struct bpf_map_info {
 	__u32 map_flags;
 	__u32 map_flags;
 	char  name[BPF_OBJ_NAME_LEN];
 	char  name[BPF_OBJ_NAME_LEN];
 	__u32 ifindex;
 	__u32 ifindex;
+	__u32 :32;
 	__u64 netns_dev;
 	__u64 netns_dev;
 	__u64 netns_ino;
 	__u64 netns_ino;
 } __attribute__((aligned(8)));
 } __attribute__((aligned(8)));

+ 31 - 14
kernel/sched/core.c

@@ -881,6 +881,33 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
 }
 }
 
 
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
+
+static inline bool is_per_cpu_kthread(struct task_struct *p)
+{
+	if (!(p->flags & PF_KTHREAD))
+		return false;
+
+	if (p->nr_cpus_allowed != 1)
+		return false;
+
+	return true;
+}
+
+/*
+ * Per-CPU kthreads are allowed to run on !actie && online CPUs, see
+ * __set_cpus_allowed_ptr() and select_fallback_rq().
+ */
+static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
+{
+	if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
+		return false;
+
+	if (is_per_cpu_kthread(p))
+		return cpu_online(cpu);
+
+	return cpu_active(cpu);
+}
+
 /*
 /*
  * This is how migration works:
  * This is how migration works:
  *
  *
@@ -938,16 +965,8 @@ struct migration_arg {
 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
 				 struct task_struct *p, int dest_cpu)
 				 struct task_struct *p, int dest_cpu)
 {
 {
-	if (p->flags & PF_KTHREAD) {
-		if (unlikely(!cpu_online(dest_cpu)))
-			return rq;
-	} else {
-		if (unlikely(!cpu_active(dest_cpu)))
-			return rq;
-	}
-
 	/* Affinity changed (again). */
 	/* Affinity changed (again). */
-	if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
+	if (!is_cpu_allowed(p, dest_cpu))
 		return rq;
 		return rq;
 
 
 	update_rq_clock(rq);
 	update_rq_clock(rq);
@@ -1476,10 +1495,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
 	for (;;) {
 	for (;;) {
 		/* Any allowed, online CPU? */
 		/* Any allowed, online CPU? */
 		for_each_cpu(dest_cpu, &p->cpus_allowed) {
 		for_each_cpu(dest_cpu, &p->cpus_allowed) {
-			if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu))
-				continue;
-			if (!cpu_online(dest_cpu))
+			if (!is_cpu_allowed(p, dest_cpu))
 				continue;
 				continue;
+
 			goto out;
 			goto out;
 		}
 		}
 
 
@@ -1542,8 +1560,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
 	 * [ this allows ->select_task() to simply return task_cpu(p) and
 	 * [ this allows ->select_task() to simply return task_cpu(p) and
 	 *   not worry about this generic constraint ]
 	 *   not worry about this generic constraint ]
 	 */
 	 */
-	if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
-		     !cpu_online(cpu)))
+	if (unlikely(!is_cpu_allowed(p, cpu)))
 		cpu = select_fallback_rq(task_cpu(p), p);
 		cpu = select_fallback_rq(task_cpu(p), p);
 
 
 	return cpu;
 	return cpu;

+ 3 - 3
kernel/sched/deadline.c

@@ -1259,6 +1259,9 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
 
 
 	rq = task_rq_lock(p, &rf);
 	rq = task_rq_lock(p, &rf);
 
 
+	sched_clock_tick();
+	update_rq_clock(rq);
+
 	if (!dl_task(p) || p->state == TASK_DEAD) {
 	if (!dl_task(p) || p->state == TASK_DEAD) {
 		struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
 		struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
 
 
@@ -1278,9 +1281,6 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
 	if (dl_se->dl_non_contending == 0)
 	if (dl_se->dl_non_contending == 0)
 		goto unlock;
 		goto unlock;
 
 
-	sched_clock_tick();
-	update_rq_clock(rq);
-
 	sub_running_bw(dl_se, &rq->dl);
 	sub_running_bw(dl_se, &rq->dl);
 	dl_se->dl_non_contending = 0;
 	dl_se->dl_non_contending = 0;
 unlock:
 unlock:

+ 1 - 1
kernel/sched/sched.h

@@ -983,7 +983,7 @@ static inline void rq_clock_skip_update(struct rq *rq)
 }
 }
 
 
 /*
 /*
- * See rt task throttoling, which is the only time a skip
+ * See rt task throttling, which is the only time a skip
  * request is cancelled.
  * request is cancelled.
  */
  */
 static inline void rq_clock_cancel_skipupdate(struct rq *rq)
 static inline void rq_clock_cancel_skipupdate(struct rq *rq)

+ 6 - 6
kernel/trace/trace.c

@@ -893,7 +893,7 @@ int __trace_bputs(unsigned long ip, const char *str)
 EXPORT_SYMBOL_GPL(__trace_bputs);
 EXPORT_SYMBOL_GPL(__trace_bputs);
 
 
 #ifdef CONFIG_TRACER_SNAPSHOT
 #ifdef CONFIG_TRACER_SNAPSHOT
-static void tracing_snapshot_instance(struct trace_array *tr)
+void tracing_snapshot_instance(struct trace_array *tr)
 {
 {
 	struct tracer *tracer = tr->current_trace;
 	struct tracer *tracer = tr->current_trace;
 	unsigned long flags;
 	unsigned long flags;
@@ -949,7 +949,7 @@ static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
 					struct trace_buffer *size_buf, int cpu_id);
 					struct trace_buffer *size_buf, int cpu_id);
 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
 
 
-static int alloc_snapshot(struct trace_array *tr)
+int tracing_alloc_snapshot_instance(struct trace_array *tr)
 {
 {
 	int ret;
 	int ret;
 
 
@@ -995,7 +995,7 @@ int tracing_alloc_snapshot(void)
 	struct trace_array *tr = &global_trace;
 	struct trace_array *tr = &global_trace;
 	int ret;
 	int ret;
 
 
-	ret = alloc_snapshot(tr);
+	ret = tracing_alloc_snapshot_instance(tr);
 	WARN_ON(ret < 0);
 	WARN_ON(ret < 0);
 
 
 	return ret;
 	return ret;
@@ -5408,7 +5408,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
 
 
 #ifdef CONFIG_TRACER_MAX_TRACE
 #ifdef CONFIG_TRACER_MAX_TRACE
 	if (t->use_max_tr && !had_max_tr) {
 	if (t->use_max_tr && !had_max_tr) {
-		ret = alloc_snapshot(tr);
+		ret = tracing_alloc_snapshot_instance(tr);
 		if (ret < 0)
 		if (ret < 0)
 			goto out;
 			goto out;
 	}
 	}
@@ -6451,7 +6451,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
 		}
 		}
 #endif
 #endif
 		if (!tr->allocated_snapshot) {
 		if (!tr->allocated_snapshot) {
-			ret = alloc_snapshot(tr);
+			ret = tracing_alloc_snapshot_instance(tr);
 			if (ret < 0)
 			if (ret < 0)
 				break;
 				break;
 		}
 		}
@@ -7179,7 +7179,7 @@ ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
 		return ret;
 		return ret;
 
 
  out_reg:
  out_reg:
-	ret = alloc_snapshot(tr);
+	ret = tracing_alloc_snapshot_instance(tr);
 	if (ret < 0)
 	if (ret < 0)
 		goto out;
 		goto out;
 
 

+ 11 - 0
kernel/trace/trace.h

@@ -1817,6 +1817,17 @@ static inline void __init trace_event_init(void) { }
 static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
 static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
 #endif
 #endif
 
 
+#ifdef CONFIG_TRACER_SNAPSHOT
+void tracing_snapshot_instance(struct trace_array *tr);
+int tracing_alloc_snapshot_instance(struct trace_array *tr);
+#else
+static inline void tracing_snapshot_instance(struct trace_array *tr) { }
+static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
+{
+	return 0;
+}
+#endif
+
 extern struct trace_iterator *tracepoint_print_iter;
 extern struct trace_iterator *tracepoint_print_iter;
 
 
 #endif /* _LINUX_KERNEL_TRACE_H */
 #endif /* _LINUX_KERNEL_TRACE_H */

+ 11 - 4
kernel/trace/trace_events_trigger.c

@@ -483,9 +483,10 @@ clear_event_triggers(struct trace_array *tr)
 	struct trace_event_file *file;
 	struct trace_event_file *file;
 
 
 	list_for_each_entry(file, &tr->events, list) {
 	list_for_each_entry(file, &tr->events, list) {
-		struct event_trigger_data *data;
-		list_for_each_entry_rcu(data, &file->triggers, list) {
+		struct event_trigger_data *data, *n;
+		list_for_each_entry_safe(data, n, &file->triggers, list) {
 			trace_event_trigger_enable_disable(file, 0);
 			trace_event_trigger_enable_disable(file, 0);
+			list_del_rcu(&data->list);
 			if (data->ops->free)
 			if (data->ops->free)
 				data->ops->free(data->ops, data);
 				data->ops->free(data->ops, data);
 		}
 		}
@@ -642,6 +643,7 @@ event_trigger_callback(struct event_command *cmd_ops,
 	trigger_data->count = -1;
 	trigger_data->count = -1;
 	trigger_data->ops = trigger_ops;
 	trigger_data->ops = trigger_ops;
 	trigger_data->cmd_ops = cmd_ops;
 	trigger_data->cmd_ops = cmd_ops;
+	trigger_data->private_data = file;
 	INIT_LIST_HEAD(&trigger_data->list);
 	INIT_LIST_HEAD(&trigger_data->list);
 	INIT_LIST_HEAD(&trigger_data->named_list);
 	INIT_LIST_HEAD(&trigger_data->named_list);
 
 
@@ -1053,7 +1055,12 @@ static void
 snapshot_trigger(struct event_trigger_data *data, void *rec,
 snapshot_trigger(struct event_trigger_data *data, void *rec,
 		 struct ring_buffer_event *event)
 		 struct ring_buffer_event *event)
 {
 {
-	tracing_snapshot();
+	struct trace_event_file *file = data->private_data;
+
+	if (file)
+		tracing_snapshot_instance(file->tr);
+	else
+		tracing_snapshot();
 }
 }
 
 
 static void
 static void
@@ -1076,7 +1083,7 @@ register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
 {
 {
 	int ret = register_trigger(glob, ops, data, file);
 	int ret = register_trigger(glob, ops, data, file);
 
 
-	if (ret > 0 && tracing_alloc_snapshot() != 0) {
+	if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) {
 		unregister_trigger(glob, ops, data, file);
 		unregister_trigger(glob, ops, data, file);
 		ret = 0;
 		ret = 0;
 	}
 	}

+ 1 - 1
mm/huge_memory.c

@@ -2431,7 +2431,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
 		__split_huge_page_tail(head, i, lruvec, list);
 		__split_huge_page_tail(head, i, lruvec, list);
 		/* Some pages can be beyond i_size: drop them from page cache */
 		/* Some pages can be beyond i_size: drop them from page cache */
 		if (head[i].index >= end) {
 		if (head[i].index >= end) {
-			__ClearPageDirty(head + i);
+			ClearPageDirty(head + i);
 			__delete_from_page_cache(head + i, NULL);
 			__delete_from_page_cache(head + i, NULL);
 			if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head))
 			if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head))
 				shmem_uncharge(head->mapping->host, 1);
 				shmem_uncharge(head->mapping->host, 1);

+ 1 - 1
mm/vmscan.c

@@ -1418,7 +1418,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
 				return ret;
 				return ret;
 
 
 			mapping = page_mapping(page);
 			mapping = page_mapping(page);
-			migrate_dirty = mapping && mapping->a_ops->migratepage;
+			migrate_dirty = !mapping || mapping->a_ops->migratepage;
 			unlock_page(page);
 			unlock_page(page);
 			if (!migrate_dirty)
 			if (!migrate_dirty)
 				return ret;
 				return ret;

+ 1 - 1
net/9p/Kconfig

@@ -32,7 +32,7 @@ config NET_9P_XEN
 
 
 
 
 config NET_9P_RDMA
 config NET_9P_RDMA
-	depends on INET && INFINIBAND_ADDR_TRANS
+	depends on INET && INFINIBAND && INFINIBAND_ADDR_TRANS
 	tristate "9P RDMA Transport (Experimental)"
 	tristate "9P RDMA Transport (Experimental)"
 	help
 	help
 	  This builds support for an RDMA transport.
 	  This builds support for an RDMA transport.

+ 2 - 1
net/bridge/netfilter/ebtables.c

@@ -1954,7 +1954,8 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
 	int off, pad = 0;
 	int off, pad = 0;
 	unsigned int size_kern, match_size = mwt->match_size;
 	unsigned int size_kern, match_size = mwt->match_size;
 
 
-	strlcpy(name, mwt->u.name, sizeof(name));
+	if (strscpy(name, mwt->u.name, sizeof(name)) < 0)
+		return -EINVAL;
 
 
 	if (state->buf_kern_start)
 	if (state->buf_kern_start)
 		dst = state->buf_kern_start + state->buf_kern_offset;
 		dst = state->buf_kern_start + state->buf_kern_offset;

+ 3 - 3
net/core/net-sysfs.c

@@ -1214,9 +1214,6 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
 	cpumask_var_t mask;
 	cpumask_var_t mask;
 	unsigned long index;
 	unsigned long index;
 
 
-	if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
-		return -ENOMEM;
-
 	index = get_netdev_queue_index(queue);
 	index = get_netdev_queue_index(queue);
 
 
 	if (dev->num_tc) {
 	if (dev->num_tc) {
@@ -1226,6 +1223,9 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
 			return -EINVAL;
 			return -EINVAL;
 	}
 	}
 
 
+	if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
+		return -ENOMEM;
+
 	rcu_read_lock();
 	rcu_read_lock();
 	dev_maps = rcu_dereference(dev->xps_maps);
 	dev_maps = rcu_dereference(dev->xps_maps);
 	if (dev_maps) {
 	if (dev_maps) {

+ 4 - 4
net/ipv4/ip_tunnel.c

@@ -328,7 +328,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
 
 
 	if (tdev) {
 	if (tdev) {
 		hlen = tdev->hard_header_len + tdev->needed_headroom;
 		hlen = tdev->hard_header_len + tdev->needed_headroom;
-		mtu = tdev->mtu;
+		mtu = min(tdev->mtu, IP_MAX_MTU);
 	}
 	}
 
 
 	dev->needed_headroom = t_hlen + hlen;
 	dev->needed_headroom = t_hlen + hlen;
@@ -362,7 +362,7 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net,
 	nt = netdev_priv(dev);
 	nt = netdev_priv(dev);
 	t_hlen = nt->hlen + sizeof(struct iphdr);
 	t_hlen = nt->hlen + sizeof(struct iphdr);
 	dev->min_mtu = ETH_MIN_MTU;
 	dev->min_mtu = ETH_MIN_MTU;
-	dev->max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen;
+	dev->max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen;
 	ip_tunnel_add(itn, nt);
 	ip_tunnel_add(itn, nt);
 	return nt;
 	return nt;
 
 
@@ -930,7 +930,7 @@ int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
 {
 {
 	struct ip_tunnel *tunnel = netdev_priv(dev);
 	struct ip_tunnel *tunnel = netdev_priv(dev);
 	int t_hlen = tunnel->hlen + sizeof(struct iphdr);
 	int t_hlen = tunnel->hlen + sizeof(struct iphdr);
-	int max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen;
+	int max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen;
 
 
 	if (new_mtu < ETH_MIN_MTU)
 	if (new_mtu < ETH_MIN_MTU)
 		return -EINVAL;
 		return -EINVAL;
@@ -1107,7 +1107,7 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
 
 
 	mtu = ip_tunnel_bind_dev(dev);
 	mtu = ip_tunnel_bind_dev(dev);
 	if (tb[IFLA_MTU]) {
 	if (tb[IFLA_MTU]) {
-		unsigned int max = 0xfff8 - dev->hard_header_len - nt->hlen;
+		unsigned int max = IP_MAX_MTU - dev->hard_header_len - nt->hlen;
 
 
 		mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU,
 		mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU,
 			    (unsigned int)(max - sizeof(struct iphdr)));
 			    (unsigned int)(max - sizeof(struct iphdr)));

+ 8 - 3
net/ipv6/ip6_tunnel.c

@@ -1692,8 +1692,13 @@ int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
 		if (new_mtu < ETH_MIN_MTU)
 		if (new_mtu < ETH_MIN_MTU)
 			return -EINVAL;
 			return -EINVAL;
 	}
 	}
-	if (new_mtu > 0xFFF8 - dev->hard_header_len)
-		return -EINVAL;
+	if (tnl->parms.proto == IPPROTO_IPV6 || tnl->parms.proto == 0) {
+		if (new_mtu > IP6_MAX_MTU - dev->hard_header_len)
+			return -EINVAL;
+	} else {
+		if (new_mtu > IP_MAX_MTU - dev->hard_header_len)
+			return -EINVAL;
+	}
 	dev->mtu = new_mtu;
 	dev->mtu = new_mtu;
 	return 0;
 	return 0;
 }
 }
@@ -1841,7 +1846,7 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
 	if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
 	if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
 		dev->mtu -= 8;
 		dev->mtu -= 8;
 	dev->min_mtu = ETH_MIN_MTU;
 	dev->min_mtu = ETH_MIN_MTU;
-	dev->max_mtu = 0xFFF8 - dev->hard_header_len;
+	dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len;
 
 
 	return 0;
 	return 0;
 
 

+ 2 - 2
net/ipv6/seg6_iptunnel.c

@@ -103,7 +103,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
 	hdrlen = (osrh->hdrlen + 1) << 3;
 	hdrlen = (osrh->hdrlen + 1) << 3;
 	tot_len = hdrlen + sizeof(*hdr);
 	tot_len = hdrlen + sizeof(*hdr);
 
 
-	err = skb_cow_head(skb, tot_len);
+	err = skb_cow_head(skb, tot_len + skb->mac_len);
 	if (unlikely(err))
 	if (unlikely(err))
 		return err;
 		return err;
 
 
@@ -161,7 +161,7 @@ int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
 
 
 	hdrlen = (osrh->hdrlen + 1) << 3;
 	hdrlen = (osrh->hdrlen + 1) << 3;
 
 
-	err = skb_cow_head(skb, hdrlen);
+	err = skb_cow_head(skb, hdrlen + skb->mac_len);
 	if (unlikely(err))
 	if (unlikely(err))
 		return err;
 		return err;
 
 

+ 3 - 2
net/ipv6/sit.c

@@ -1371,7 +1371,7 @@ static void ipip6_tunnel_setup(struct net_device *dev)
 	dev->hard_header_len	= LL_MAX_HEADER + t_hlen;
 	dev->hard_header_len	= LL_MAX_HEADER + t_hlen;
 	dev->mtu		= ETH_DATA_LEN - t_hlen;
 	dev->mtu		= ETH_DATA_LEN - t_hlen;
 	dev->min_mtu		= IPV6_MIN_MTU;
 	dev->min_mtu		= IPV6_MIN_MTU;
-	dev->max_mtu		= 0xFFF8 - t_hlen;
+	dev->max_mtu		= IP6_MAX_MTU - t_hlen;
 	dev->flags		= IFF_NOARP;
 	dev->flags		= IFF_NOARP;
 	netif_keep_dst(dev);
 	netif_keep_dst(dev);
 	dev->addr_len		= 4;
 	dev->addr_len		= 4;
@@ -1583,7 +1583,8 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev,
 	if (tb[IFLA_MTU]) {
 	if (tb[IFLA_MTU]) {
 		u32 mtu = nla_get_u32(tb[IFLA_MTU]);
 		u32 mtu = nla_get_u32(tb[IFLA_MTU]);
 
 
-		if (mtu >= IPV6_MIN_MTU && mtu <= 0xFFF8 - dev->hard_header_len)
+		if (mtu >= IPV6_MIN_MTU &&
+		    mtu <= IP6_MAX_MTU - dev->hard_header_len)
 			dev->mtu = mtu;
 			dev->mtu = mtu;
 	}
 	}
 
 

+ 1 - 1
net/ipv6/xfrm6_policy.c

@@ -126,7 +126,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
 	struct flowi6 *fl6 = &fl->u.ip6;
 	struct flowi6 *fl6 = &fl->u.ip6;
 	int onlyproto = 0;
 	int onlyproto = 0;
 	const struct ipv6hdr *hdr = ipv6_hdr(skb);
 	const struct ipv6hdr *hdr = ipv6_hdr(skb);
-	u16 offset = sizeof(*hdr);
+	u32 offset = sizeof(*hdr);
 	struct ipv6_opt_hdr *exthdr;
 	struct ipv6_opt_hdr *exthdr;
 	const unsigned char *nh = skb_network_header(skb);
 	const unsigned char *nh = skb_network_header(skb);
 	u16 nhoff = IP6CB(skb)->nhoff;
 	u16 nhoff = IP6CB(skb)->nhoff;

+ 1 - 1
net/kcm/kcmsock.c

@@ -1671,7 +1671,7 @@ static struct file *kcm_clone(struct socket *osock)
 	__module_get(newsock->ops->owner);
 	__module_get(newsock->ops->owner);
 
 
 	newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
 	newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
-			 &kcm_proto, true);
+			 &kcm_proto, false);
 	if (!newsk) {
 	if (!newsk) {
 		sock_release(newsock);
 		sock_release(newsock);
 		return ERR_PTR(-ENOMEM);
 		return ERR_PTR(-ENOMEM);

+ 1 - 1
net/ncsi/ncsi-netlink.c

@@ -215,7 +215,7 @@ err:
 static int ncsi_pkg_info_all_nl(struct sk_buff *skb,
 static int ncsi_pkg_info_all_nl(struct sk_buff *skb,
 				struct netlink_callback *cb)
 				struct netlink_callback *cb)
 {
 {
-	struct nlattr *attrs[NCSI_ATTR_MAX];
+	struct nlattr *attrs[NCSI_ATTR_MAX + 1];
 	struct ncsi_package *np, *package;
 	struct ncsi_package *np, *package;
 	struct ncsi_dev_priv *ndp;
 	struct ncsi_dev_priv *ndp;
 	unsigned int package_id;
 	unsigned int package_id;

+ 15 - 6
net/netfilter/ipvs/ip_vs_ctl.c

@@ -2381,8 +2381,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
 			struct ipvs_sync_daemon_cfg cfg;
 			struct ipvs_sync_daemon_cfg cfg;
 
 
 			memset(&cfg, 0, sizeof(cfg));
 			memset(&cfg, 0, sizeof(cfg));
-			strlcpy(cfg.mcast_ifn, dm->mcast_ifn,
-				sizeof(cfg.mcast_ifn));
+			ret = -EINVAL;
+			if (strscpy(cfg.mcast_ifn, dm->mcast_ifn,
+				    sizeof(cfg.mcast_ifn)) <= 0)
+				goto out_dec;
 			cfg.syncid = dm->syncid;
 			cfg.syncid = dm->syncid;
 			ret = start_sync_thread(ipvs, &cfg, dm->state);
 			ret = start_sync_thread(ipvs, &cfg, dm->state);
 		} else {
 		} else {
@@ -2420,12 +2422,19 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
 		}
 		}
 	}
 	}
 
 
+	if ((cmd == IP_VS_SO_SET_ADD || cmd == IP_VS_SO_SET_EDIT) &&
+	    strnlen(usvc.sched_name, IP_VS_SCHEDNAME_MAXLEN) ==
+	    IP_VS_SCHEDNAME_MAXLEN) {
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
 	/* Check for valid protocol: TCP or UDP or SCTP, even for fwmark!=0 */
 	/* Check for valid protocol: TCP or UDP or SCTP, even for fwmark!=0 */
 	if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP &&
 	if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP &&
 	    usvc.protocol != IPPROTO_SCTP) {
 	    usvc.protocol != IPPROTO_SCTP) {
-		pr_err("set_ctl: invalid protocol: %d %pI4:%d %s\n",
+		pr_err("set_ctl: invalid protocol: %d %pI4:%d\n",
 		       usvc.protocol, &usvc.addr.ip,
 		       usvc.protocol, &usvc.addr.ip,
-		       ntohs(usvc.port), usvc.sched_name);
+		       ntohs(usvc.port));
 		ret = -EFAULT;
 		ret = -EFAULT;
 		goto out_unlock;
 		goto out_unlock;
 	}
 	}
@@ -2847,7 +2856,7 @@ static const struct nla_policy ip_vs_cmd_policy[IPVS_CMD_ATTR_MAX + 1] = {
 static const struct nla_policy ip_vs_daemon_policy[IPVS_DAEMON_ATTR_MAX + 1] = {
 static const struct nla_policy ip_vs_daemon_policy[IPVS_DAEMON_ATTR_MAX + 1] = {
 	[IPVS_DAEMON_ATTR_STATE]	= { .type = NLA_U32 },
 	[IPVS_DAEMON_ATTR_STATE]	= { .type = NLA_U32 },
 	[IPVS_DAEMON_ATTR_MCAST_IFN]	= { .type = NLA_NUL_STRING,
 	[IPVS_DAEMON_ATTR_MCAST_IFN]	= { .type = NLA_NUL_STRING,
-					    .len = IP_VS_IFNAME_MAXLEN },
+					    .len = IP_VS_IFNAME_MAXLEN - 1 },
 	[IPVS_DAEMON_ATTR_SYNC_ID]	= { .type = NLA_U32 },
 	[IPVS_DAEMON_ATTR_SYNC_ID]	= { .type = NLA_U32 },
 	[IPVS_DAEMON_ATTR_SYNC_MAXLEN]	= { .type = NLA_U16 },
 	[IPVS_DAEMON_ATTR_SYNC_MAXLEN]	= { .type = NLA_U16 },
 	[IPVS_DAEMON_ATTR_MCAST_GROUP]	= { .type = NLA_U32 },
 	[IPVS_DAEMON_ATTR_MCAST_GROUP]	= { .type = NLA_U32 },
@@ -2865,7 +2874,7 @@ static const struct nla_policy ip_vs_svc_policy[IPVS_SVC_ATTR_MAX + 1] = {
 	[IPVS_SVC_ATTR_PORT]		= { .type = NLA_U16 },
 	[IPVS_SVC_ATTR_PORT]		= { .type = NLA_U16 },
 	[IPVS_SVC_ATTR_FWMARK]		= { .type = NLA_U32 },
 	[IPVS_SVC_ATTR_FWMARK]		= { .type = NLA_U32 },
 	[IPVS_SVC_ATTR_SCHED_NAME]	= { .type = NLA_NUL_STRING,
 	[IPVS_SVC_ATTR_SCHED_NAME]	= { .type = NLA_NUL_STRING,
-					    .len = IP_VS_SCHEDNAME_MAXLEN },
+					    .len = IP_VS_SCHEDNAME_MAXLEN - 1 },
 	[IPVS_SVC_ATTR_PE_NAME]		= { .type = NLA_NUL_STRING,
 	[IPVS_SVC_ATTR_PE_NAME]		= { .type = NLA_NUL_STRING,
 					    .len = IP_VS_PENAME_MAXLEN },
 					    .len = IP_VS_PENAME_MAXLEN },
 	[IPVS_SVC_ATTR_FLAGS]		= { .type = NLA_BINARY,
 	[IPVS_SVC_ATTR_FLAGS]		= { .type = NLA_BINARY,

+ 5 - 3
net/netfilter/nf_tables_api.c

@@ -1298,8 +1298,10 @@ static void nft_chain_stats_replace(struct nft_base_chain *chain,
 		rcu_assign_pointer(chain->stats, newstats);
 		rcu_assign_pointer(chain->stats, newstats);
 		synchronize_rcu();
 		synchronize_rcu();
 		free_percpu(oldstats);
 		free_percpu(oldstats);
-	} else
+	} else {
 		rcu_assign_pointer(chain->stats, newstats);
 		rcu_assign_pointer(chain->stats, newstats);
+		static_branch_inc(&nft_counters_enabled);
+	}
 }
 }
 
 
 static void nf_tables_chain_destroy(struct nft_ctx *ctx)
 static void nf_tables_chain_destroy(struct nft_ctx *ctx)
@@ -4706,7 +4708,7 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
 			if (idx > s_idx)
 			if (idx > s_idx)
 				memset(&cb->args[1], 0,
 				memset(&cb->args[1], 0,
 				       sizeof(cb->args) - sizeof(cb->args[0]));
 				       sizeof(cb->args) - sizeof(cb->args[0]));
-			if (filter && filter->table[0] &&
+			if (filter && filter->table &&
 			    strcmp(filter->table, table->name))
 			    strcmp(filter->table, table->name))
 				goto cont;
 				goto cont;
 			if (filter &&
 			if (filter &&
@@ -5380,7 +5382,7 @@ static int nf_tables_dump_flowtable(struct sk_buff *skb,
 			if (idx > s_idx)
 			if (idx > s_idx)
 				memset(&cb->args[1], 0,
 				memset(&cb->args[1], 0,
 				       sizeof(cb->args) - sizeof(cb->args[0]));
 				       sizeof(cb->args) - sizeof(cb->args[0]));
-			if (filter && filter->table[0] &&
+			if (filter && filter->table &&
 			    strcmp(filter->table, table->name))
 			    strcmp(filter->table, table->name))
 				goto cont;
 				goto cont;
 
 

+ 2 - 2
net/netfilter/nf_tables_core.c

@@ -126,15 +126,15 @@ static noinline void nft_update_chain_stats(const struct nft_chain *chain,
 	if (!base_chain->stats)
 	if (!base_chain->stats)
 		return;
 		return;
 
 
+	local_bh_disable();
 	stats = this_cpu_ptr(rcu_dereference(base_chain->stats));
 	stats = this_cpu_ptr(rcu_dereference(base_chain->stats));
 	if (stats) {
 	if (stats) {
-		local_bh_disable();
 		u64_stats_update_begin(&stats->syncp);
 		u64_stats_update_begin(&stats->syncp);
 		stats->pkts++;
 		stats->pkts++;
 		stats->bytes += pkt->skb->len;
 		stats->bytes += pkt->skb->len;
 		u64_stats_update_end(&stats->syncp);
 		u64_stats_update_end(&stats->syncp);
-		local_bh_enable();
 	}
 	}
+	local_bh_enable();
 }
 }
 
 
 struct nft_jumpstack {
 struct nft_jumpstack {

+ 1 - 1
net/netfilter/nfnetlink_acct.c

@@ -115,7 +115,7 @@ static int nfnl_acct_new(struct net *net, struct sock *nfnl,
 		nfacct->flags = flags;
 		nfacct->flags = flags;
 	}
 	}
 
 
-	nla_strlcpy(nfacct->name, nla_data(tb[NFACCT_NAME]), NFACCT_NAME_MAX);
+	nla_strlcpy(nfacct->name, tb[NFACCT_NAME], NFACCT_NAME_MAX);
 
 
 	if (tb[NFACCT_BYTES]) {
 	if (tb[NFACCT_BYTES]) {
 		atomic64_set(&nfacct->bytes,
 		atomic64_set(&nfacct->bytes,

+ 2 - 2
net/netfilter/nfnetlink_cthelper.c

@@ -150,7 +150,7 @@ nfnl_cthelper_expect_policy(struct nf_conntrack_expect_policy *expect_policy,
 		return -EINVAL;
 		return -EINVAL;
 
 
 	nla_strlcpy(expect_policy->name,
 	nla_strlcpy(expect_policy->name,
-		    nla_data(tb[NFCTH_POLICY_NAME]), NF_CT_HELPER_NAME_LEN);
+		    tb[NFCTH_POLICY_NAME], NF_CT_HELPER_NAME_LEN);
 	expect_policy->max_expected =
 	expect_policy->max_expected =
 		ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX]));
 		ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX]));
 	if (expect_policy->max_expected > NF_CT_EXPECT_MAX_CNT)
 	if (expect_policy->max_expected > NF_CT_EXPECT_MAX_CNT)
@@ -235,7 +235,7 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
 		goto err1;
 		goto err1;
 
 
 	nla_strlcpy(helper->name,
 	nla_strlcpy(helper->name,
-		    nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN);
+		    tb[NFCTH_NAME], NF_CT_HELPER_NAME_LEN);
 	size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
 	size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
 	if (size > FIELD_SIZEOF(struct nf_conn_help, data)) {
 	if (size > FIELD_SIZEOF(struct nf_conn_help, data)) {
 		ret = -ENOMEM;
 		ret = -ENOMEM;

+ 12 - 8
net/netfilter/nft_ct.c

@@ -880,22 +880,26 @@ static int nft_ct_helper_obj_dump(struct sk_buff *skb,
 				  struct nft_object *obj, bool reset)
 				  struct nft_object *obj, bool reset)
 {
 {
 	const struct nft_ct_helper_obj *priv = nft_obj_data(obj);
 	const struct nft_ct_helper_obj *priv = nft_obj_data(obj);
-	const struct nf_conntrack_helper *helper = priv->helper4;
+	const struct nf_conntrack_helper *helper;
 	u16 family;
 	u16 family;
 
 
+	if (priv->helper4 && priv->helper6) {
+		family = NFPROTO_INET;
+		helper = priv->helper4;
+	} else if (priv->helper6) {
+		family = NFPROTO_IPV6;
+		helper = priv->helper6;
+	} else {
+		family = NFPROTO_IPV4;
+		helper = priv->helper4;
+	}
+
 	if (nla_put_string(skb, NFTA_CT_HELPER_NAME, helper->name))
 	if (nla_put_string(skb, NFTA_CT_HELPER_NAME, helper->name))
 		return -1;
 		return -1;
 
 
 	if (nla_put_u8(skb, NFTA_CT_HELPER_L4PROTO, priv->l4proto))
 	if (nla_put_u8(skb, NFTA_CT_HELPER_L4PROTO, priv->l4proto))
 		return -1;
 		return -1;
 
 
-	if (priv->helper4 && priv->helper6)
-		family = NFPROTO_INET;
-	else if (priv->helper6)
-		family = NFPROTO_IPV6;
-	else
-		family = NFPROTO_IPV4;
-
 	if (nla_put_be16(skb, NFTA_CT_HELPER_L3PROTO, htons(family)))
 	if (nla_put_be16(skb, NFTA_CT_HELPER_L3PROTO, htons(family)))
 		return -1;
 		return -1;
 
 

+ 24 - 14
net/netfilter/nft_limit.c

@@ -51,10 +51,13 @@ static inline bool nft_limit_eval(struct nft_limit *limit, u64 cost)
 	return !limit->invert;
 	return !limit->invert;
 }
 }
 
 
+/* Use same default as in iptables. */
+#define NFT_LIMIT_PKT_BURST_DEFAULT	5
+
 static int nft_limit_init(struct nft_limit *limit,
 static int nft_limit_init(struct nft_limit *limit,
-			  const struct nlattr * const tb[])
+			  const struct nlattr * const tb[], bool pkts)
 {
 {
-	u64 unit;
+	u64 unit, tokens;
 
 
 	if (tb[NFTA_LIMIT_RATE] == NULL ||
 	if (tb[NFTA_LIMIT_RATE] == NULL ||
 	    tb[NFTA_LIMIT_UNIT] == NULL)
 	    tb[NFTA_LIMIT_UNIT] == NULL)
@@ -68,18 +71,25 @@ static int nft_limit_init(struct nft_limit *limit,
 
 
 	if (tb[NFTA_LIMIT_BURST])
 	if (tb[NFTA_LIMIT_BURST])
 		limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST]));
 		limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST]));
-	else
-		limit->burst = 0;
+
+	if (pkts && limit->burst == 0)
+		limit->burst = NFT_LIMIT_PKT_BURST_DEFAULT;
 
 
 	if (limit->rate + limit->burst < limit->rate)
 	if (limit->rate + limit->burst < limit->rate)
 		return -EOVERFLOW;
 		return -EOVERFLOW;
 
 
-	/* The token bucket size limits the number of tokens can be
-	 * accumulated. tokens_max specifies the bucket size.
-	 * tokens_max = unit * (rate + burst) / rate.
-	 */
-	limit->tokens = div_u64(limit->nsecs * (limit->rate + limit->burst),
-				limit->rate);
+	if (pkts) {
+		tokens = div_u64(limit->nsecs, limit->rate) * limit->burst;
+	} else {
+		/* The token bucket size limits the number of tokens can be
+		 * accumulated. tokens_max specifies the bucket size.
+		 * tokens_max = unit * (rate + burst) / rate.
+		 */
+		tokens = div_u64(limit->nsecs * (limit->rate + limit->burst),
+				 limit->rate);
+	}
+
+	limit->tokens = tokens;
 	limit->tokens_max = limit->tokens;
 	limit->tokens_max = limit->tokens;
 
 
 	if (tb[NFTA_LIMIT_FLAGS]) {
 	if (tb[NFTA_LIMIT_FLAGS]) {
@@ -144,7 +154,7 @@ static int nft_limit_pkts_init(const struct nft_ctx *ctx,
 	struct nft_limit_pkts *priv = nft_expr_priv(expr);
 	struct nft_limit_pkts *priv = nft_expr_priv(expr);
 	int err;
 	int err;
 
 
-	err = nft_limit_init(&priv->limit, tb);
+	err = nft_limit_init(&priv->limit, tb, true);
 	if (err < 0)
 	if (err < 0)
 		return err;
 		return err;
 
 
@@ -185,7 +195,7 @@ static int nft_limit_bytes_init(const struct nft_ctx *ctx,
 {
 {
 	struct nft_limit *priv = nft_expr_priv(expr);
 	struct nft_limit *priv = nft_expr_priv(expr);
 
 
-	return nft_limit_init(priv, tb);
+	return nft_limit_init(priv, tb, false);
 }
 }
 
 
 static int nft_limit_bytes_dump(struct sk_buff *skb,
 static int nft_limit_bytes_dump(struct sk_buff *skb,
@@ -246,7 +256,7 @@ static int nft_limit_obj_pkts_init(const struct nft_ctx *ctx,
 	struct nft_limit_pkts *priv = nft_obj_data(obj);
 	struct nft_limit_pkts *priv = nft_obj_data(obj);
 	int err;
 	int err;
 
 
-	err = nft_limit_init(&priv->limit, tb);
+	err = nft_limit_init(&priv->limit, tb, true);
 	if (err < 0)
 	if (err < 0)
 		return err;
 		return err;
 
 
@@ -289,7 +299,7 @@ static int nft_limit_obj_bytes_init(const struct nft_ctx *ctx,
 {
 {
 	struct nft_limit *priv = nft_obj_data(obj);
 	struct nft_limit *priv = nft_obj_data(obj);
 
 
-	return nft_limit_init(priv, tb);
+	return nft_limit_init(priv, tb, false);
 }
 }
 
 
 static int nft_limit_obj_bytes_dump(struct sk_buff *skb,
 static int nft_limit_obj_bytes_dump(struct sk_buff *skb,

+ 8 - 6
net/netfilter/nft_meta.c

@@ -234,7 +234,7 @@ void nft_meta_set_eval(const struct nft_expr *expr,
 	struct sk_buff *skb = pkt->skb;
 	struct sk_buff *skb = pkt->skb;
 	u32 *sreg = &regs->data[meta->sreg];
 	u32 *sreg = &regs->data[meta->sreg];
 	u32 value = *sreg;
 	u32 value = *sreg;
-	u8 pkt_type;
+	u8 value8;
 
 
 	switch (meta->key) {
 	switch (meta->key) {
 	case NFT_META_MARK:
 	case NFT_META_MARK:
@@ -244,15 +244,17 @@ void nft_meta_set_eval(const struct nft_expr *expr,
 		skb->priority = value;
 		skb->priority = value;
 		break;
 		break;
 	case NFT_META_PKTTYPE:
 	case NFT_META_PKTTYPE:
-		pkt_type = nft_reg_load8(sreg);
+		value8 = nft_reg_load8(sreg);
 
 
-		if (skb->pkt_type != pkt_type &&
-		    skb_pkt_type_ok(pkt_type) &&
+		if (skb->pkt_type != value8 &&
+		    skb_pkt_type_ok(value8) &&
 		    skb_pkt_type_ok(skb->pkt_type))
 		    skb_pkt_type_ok(skb->pkt_type))
-			skb->pkt_type = pkt_type;
+			skb->pkt_type = value8;
 		break;
 		break;
 	case NFT_META_NFTRACE:
 	case NFT_META_NFTRACE:
-		skb->nf_trace = !!value;
+		value8 = nft_reg_load8(sreg);
+
+		skb->nf_trace = !!value8;
 		break;
 		break;
 	default:
 	default:
 		WARN_ON(1);
 		WARN_ON(1);

+ 1 - 1
net/rds/Kconfig

@@ -8,7 +8,7 @@ config RDS
 
 
 config RDS_RDMA
 config RDS_RDMA
 	tristate "RDS over Infiniband"
 	tristate "RDS over Infiniband"
-	depends on RDS && INFINIBAND_ADDR_TRANS
+	depends on RDS && INFINIBAND && INFINIBAND_ADDR_TRANS
 	---help---
 	---help---
 	  Allow RDS to use Infiniband as a transport.
 	  Allow RDS to use Infiniband as a transport.
 	  This transport supports RDMA operations.
 	  This transport supports RDMA operations.

+ 1 - 1
net/sched/cls_flower.c

@@ -977,7 +977,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
 	return 0;
 	return 0;
 
 
 errout_idr:
 errout_idr:
-	if (fnew->handle)
+	if (!fold)
 		idr_remove(&head->handle_idr, fnew->handle);
 		idr_remove(&head->handle_idr, fnew->handle);
 errout:
 errout:
 	tcf_exts_destroy(&fnew->exts);
 	tcf_exts_destroy(&fnew->exts);

+ 1 - 1
net/sunrpc/Kconfig

@@ -50,7 +50,7 @@ config SUNRPC_DEBUG
 
 
 config SUNRPC_XPRT_RDMA
 config SUNRPC_XPRT_RDMA
 	tristate "RPC-over-RDMA transport"
 	tristate "RPC-over-RDMA transport"
-	depends on SUNRPC && INFINIBAND_ADDR_TRANS
+	depends on SUNRPC && INFINIBAND && INFINIBAND_ADDR_TRANS
 	default SUNRPC && INFINIBAND
 	default SUNRPC && INFINIBAND
 	select SG_POOL
 	select SG_POOL
 	help
 	help

+ 2 - 3
net/xfrm/xfrm_policy.c

@@ -1658,7 +1658,6 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
 		trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
 		trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
 	}
 	}
 
 
-out:
 	return &xdst0->u.dst;
 	return &xdst0->u.dst;
 
 
 put_states:
 put_states:
@@ -1667,8 +1666,8 @@ put_states:
 free_dst:
 free_dst:
 	if (xdst0)
 	if (xdst0)
 		dst_release_immediate(&xdst0->u.dst);
 		dst_release_immediate(&xdst0->u.dst);
-	xdst0 = ERR_PTR(err);
-	goto out;
+
+	return ERR_PTR(err);
 }
 }
 
 
 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
 static int xfrm_expand_policies(const struct flowi *fl, u16 family,

+ 1 - 1
security/selinux/ss/services.c

@@ -1494,7 +1494,7 @@ static int security_context_to_sid_core(struct selinux_state *state,
 				      scontext_len, &context, def_sid);
 				      scontext_len, &context, def_sid);
 	if (rc == -EINVAL && force) {
 	if (rc == -EINVAL && force) {
 		context.str = str;
 		context.str = str;
-		context.len = scontext_len;
+		context.len = strlen(str) + 1;
 		str = NULL;
 		str = NULL;
 	} else if (rc)
 	} else if (rc)
 		goto out_unlock;
 		goto out_unlock;

+ 14 - 6
tools/arch/x86/include/asm/cpufeatures.h

@@ -198,7 +198,6 @@
 #define X86_FEATURE_CAT_L2		( 7*32+ 5) /* Cache Allocation Technology L2 */
 #define X86_FEATURE_CAT_L2		( 7*32+ 5) /* Cache Allocation Technology L2 */
 #define X86_FEATURE_CDP_L3		( 7*32+ 6) /* Code and Data Prioritization L3 */
 #define X86_FEATURE_CDP_L3		( 7*32+ 6) /* Code and Data Prioritization L3 */
 #define X86_FEATURE_INVPCID_SINGLE	( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
 #define X86_FEATURE_INVPCID_SINGLE	( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
-
 #define X86_FEATURE_HW_PSTATE		( 7*32+ 8) /* AMD HW-PState */
 #define X86_FEATURE_HW_PSTATE		( 7*32+ 8) /* AMD HW-PState */
 #define X86_FEATURE_PROC_FEEDBACK	( 7*32+ 9) /* AMD ProcFeedbackInterface */
 #define X86_FEATURE_PROC_FEEDBACK	( 7*32+ 9) /* AMD ProcFeedbackInterface */
 #define X86_FEATURE_SME			( 7*32+10) /* AMD Secure Memory Encryption */
 #define X86_FEATURE_SME			( 7*32+10) /* AMD Secure Memory Encryption */
@@ -207,13 +206,19 @@
 #define X86_FEATURE_RETPOLINE_AMD	( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
 #define X86_FEATURE_RETPOLINE_AMD	( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
 #define X86_FEATURE_INTEL_PPIN		( 7*32+14) /* Intel Processor Inventory Number */
 #define X86_FEATURE_INTEL_PPIN		( 7*32+14) /* Intel Processor Inventory Number */
 #define X86_FEATURE_CDP_L2		( 7*32+15) /* Code and Data Prioritization L2 */
 #define X86_FEATURE_CDP_L2		( 7*32+15) /* Code and Data Prioritization L2 */
-
+#define X86_FEATURE_MSR_SPEC_CTRL	( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
+#define X86_FEATURE_SSBD		( 7*32+17) /* Speculative Store Bypass Disable */
 #define X86_FEATURE_MBA			( 7*32+18) /* Memory Bandwidth Allocation */
 #define X86_FEATURE_MBA			( 7*32+18) /* Memory Bandwidth Allocation */
 #define X86_FEATURE_RSB_CTXSW		( 7*32+19) /* "" Fill RSB on context switches */
 #define X86_FEATURE_RSB_CTXSW		( 7*32+19) /* "" Fill RSB on context switches */
 #define X86_FEATURE_SEV			( 7*32+20) /* AMD Secure Encrypted Virtualization */
 #define X86_FEATURE_SEV			( 7*32+20) /* AMD Secure Encrypted Virtualization */
-
 #define X86_FEATURE_USE_IBPB		( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
 #define X86_FEATURE_USE_IBPB		( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
 #define X86_FEATURE_USE_IBRS_FW		( 7*32+22) /* "" Use IBRS during runtime firmware calls */
 #define X86_FEATURE_USE_IBRS_FW		( 7*32+22) /* "" Use IBRS during runtime firmware calls */
+#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE	( 7*32+23) /* "" Disable Speculative Store Bypass. */
+#define X86_FEATURE_LS_CFG_SSBD		( 7*32+24)  /* "" AMD SSBD implementation via LS_CFG MSR */
+#define X86_FEATURE_IBRS		( 7*32+25) /* Indirect Branch Restricted Speculation */
+#define X86_FEATURE_IBPB		( 7*32+26) /* Indirect Branch Prediction Barrier */
+#define X86_FEATURE_STIBP		( 7*32+27) /* Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_ZEN			( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
 
 
 /* Virtualization flags: Linux defined, word 8 */
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW		( 8*32+ 0) /* Intel TPR Shadow */
 #define X86_FEATURE_TPR_SHADOW		( 8*32+ 0) /* Intel TPR Shadow */
@@ -274,9 +279,10 @@
 #define X86_FEATURE_CLZERO		(13*32+ 0) /* CLZERO instruction */
 #define X86_FEATURE_CLZERO		(13*32+ 0) /* CLZERO instruction */
 #define X86_FEATURE_IRPERF		(13*32+ 1) /* Instructions Retired Count */
 #define X86_FEATURE_IRPERF		(13*32+ 1) /* Instructions Retired Count */
 #define X86_FEATURE_XSAVEERPTR		(13*32+ 2) /* Always save/restore FP error pointers */
 #define X86_FEATURE_XSAVEERPTR		(13*32+ 2) /* Always save/restore FP error pointers */
-#define X86_FEATURE_IBPB		(13*32+12) /* Indirect Branch Prediction Barrier */
-#define X86_FEATURE_IBRS		(13*32+14) /* Indirect Branch Restricted Speculation */
-#define X86_FEATURE_STIBP		(13*32+15) /* Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_AMD_IBPB		(13*32+12) /* "" Indirect Branch Prediction Barrier */
+#define X86_FEATURE_AMD_IBRS		(13*32+14) /* "" Indirect Branch Restricted Speculation */
+#define X86_FEATURE_AMD_STIBP		(13*32+15) /* "" Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_VIRT_SSBD		(13*32+25) /* Virtualized Speculative Store Bypass Disable */
 
 
 /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
 /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
 #define X86_FEATURE_DTHERM		(14*32+ 0) /* Digital Thermal Sensor */
 #define X86_FEATURE_DTHERM		(14*32+ 0) /* Digital Thermal Sensor */
@@ -334,6 +340,7 @@
 #define X86_FEATURE_SPEC_CTRL		(18*32+26) /* "" Speculation Control (IBRS + IBPB) */
 #define X86_FEATURE_SPEC_CTRL		(18*32+26) /* "" Speculation Control (IBRS + IBPB) */
 #define X86_FEATURE_INTEL_STIBP		(18*32+27) /* "" Single Thread Indirect Branch Predictors */
 #define X86_FEATURE_INTEL_STIBP		(18*32+27) /* "" Single Thread Indirect Branch Predictors */
 #define X86_FEATURE_ARCH_CAPABILITIES	(18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
 #define X86_FEATURE_ARCH_CAPABILITIES	(18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
+#define X86_FEATURE_SPEC_CTRL_SSBD	(18*32+31) /* "" Speculative Store Bypass Disable */
 
 
 /*
 /*
  * BUG word(s)
  * BUG word(s)
@@ -363,5 +370,6 @@
 #define X86_BUG_CPU_MELTDOWN		X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
 #define X86_BUG_CPU_MELTDOWN		X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
 #define X86_BUG_SPECTRE_V1		X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
 #define X86_BUG_SPECTRE_V1		X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
 #define X86_BUG_SPECTRE_V2		X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
 #define X86_BUG_SPECTRE_V2		X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
+#define X86_BUG_SPEC_STORE_BYPASS	X86_BUG(17) /* CPU is affected by speculative store bypass attack */
 
 
 #endif /* _ASM_X86_CPUFEATURES_H */
 #endif /* _ASM_X86_CPUFEATURES_H */

+ 2 - 0
tools/include/uapi/linux/bpf.h

@@ -1017,6 +1017,7 @@ struct bpf_prog_info {
 	__aligned_u64 map_ids;
 	__aligned_u64 map_ids;
 	char name[BPF_OBJ_NAME_LEN];
 	char name[BPF_OBJ_NAME_LEN];
 	__u32 ifindex;
 	__u32 ifindex;
+	__u32 :32;
 	__u64 netns_dev;
 	__u64 netns_dev;
 	__u64 netns_ino;
 	__u64 netns_ino;
 } __attribute__((aligned(8)));
 } __attribute__((aligned(8)));
@@ -1030,6 +1031,7 @@ struct bpf_map_info {
 	__u32 map_flags;
 	__u32 map_flags;
 	char  name[BPF_OBJ_NAME_LEN];
 	char  name[BPF_OBJ_NAME_LEN];
 	__u32 ifindex;
 	__u32 ifindex;
+	__u32 :32;
 	__u64 netns_dev;
 	__u64 netns_dev;
 	__u64 netns_ino;
 	__u64 netns_ino;
 } __attribute__((aligned(8)));
 } __attribute__((aligned(8)));

Some files were not shown because too many files changed in this diff