浏览代码

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Simple cases of overlapping changes in the packet scheduler.

Must easier to resolve this time.

Which probably means that I screwed it up somehow.

Signed-off-by: David S. Miller <davem@davemloft.net>
David S. Miller 7 年之前
父节点
当前提交
4dc6758d78
共有 100 个文件被更改,包括 478 次插入301 次删除
  1. 3 0
      .mailmap
  2. 16 6
      Documentation/process/kernel-enforcement-statement.rst
  3. 9 5
      MAINTAINERS
  4. 1 1
      Makefile
  5. 2 0
      arch/arm/Makefile
  6. 9 0
      arch/arm/boot/compressed/vmlinux.lds.S
  7. 2 2
      arch/arm/boot/dts/armada-375.dtsi
  8. 2 2
      arch/arm/boot/dts/armada-38x.dtsi
  9. 2 2
      arch/arm/boot/dts/armada-39x.dtsi
  10. 6 3
      arch/arm/boot/dts/uniphier-ld4.dtsi
  11. 4 2
      arch/arm/boot/dts/uniphier-pro4.dtsi
  12. 6 3
      arch/arm/boot/dts/uniphier-sld8.dtsi
  13. 0 1
      arch/arm/include/asm/Kbuild
  14. 27 0
      arch/arm/include/asm/unaligned.h
  15. 2 4
      arch/arm/kvm/emulate.c
  16. 1 1
      arch/arm/kvm/hyp/Makefile
  17. 6 3
      arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
  18. 1 1
      arch/arm64/kvm/hyp/Makefile
  19. 15 1
      arch/arm64/kvm/inject_fault.c
  20. 2 0
      arch/ia64/include/asm/acpi.h
  21. 1 1
      arch/mips/generic/board-ni169445.its.S
  22. 1 1
      arch/mips/generic/init.c
  23. 1 1
      arch/mips/generic/kexec.c
  24. 2 2
      arch/mips/include/asm/mips-cm.h
  25. 4 4
      arch/mips/include/asm/stackframe.h
  26. 1 1
      arch/mips/kernel/probes-common.h
  27. 3 3
      arch/mips/kernel/smp-cmp.c
  28. 1 1
      arch/mips/kernel/smp-cps.c
  29. 17 7
      arch/mips/kernel/smp.c
  30. 1 1
      arch/mips/mm/uasm-micromips.c
  31. 1 1
      arch/mips/net/ebpf_jit.c
  32. 6 6
      arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
  33. 6 6
      arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
  34. 2 0
      arch/x86/include/asm/acpi.h
  35. 27 94
      arch/x86/kernel/cpu/mcheck/dev-mcelog.c
  36. 1 1
      arch/x86/kernel/kvmclock.c
  37. 13 0
      arch/x86/kernel/module.c
  38. 5 0
      arch/x86/kvm/lapic.c
  39. 0 3
      arch/x86/kvm/vmx.c
  40. 1 1
      arch/x86/mm/mem_encrypt.c
  41. 14 3
      arch/x86/mm/tlb.c
  42. 3 1
      crypto/ccm.c
  43. 2 0
      drivers/acpi/processor_idle.c
  44. 28 0
      drivers/acpi/sleep.c
  45. 3 3
      drivers/firmware/efi/libstub/Makefile
  46. 5 2
      drivers/firmware/efi/libstub/arm-stub.c
  47. 6 1
      drivers/ide/ide-cd.c
  48. 5 4
      drivers/idle/intel_idle.c
  49. 1 0
      drivers/input/sparse-keymap.c
  50. 1 0
      drivers/input/touchscreen/ar1021_i2c.c
  51. 1 0
      drivers/irqchip/irq-mvebu-gicp.c
  52. 2 1
      drivers/net/bonding/bond_main.c
  53. 3 3
      drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
  54. 4 0
      drivers/net/ethernet/marvell/mvpp2.c
  55. 8 5
      drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
  56. 2 2
      drivers/net/usb/asix_devices.c
  57. 1 1
      drivers/net/usb/cdc_ether.c
  58. 2 1
      drivers/net/usb/qmi_wwan.c
  59. 0 3
      drivers/scsi/scsi_lib.c
  60. 1 4
      drivers/scsi/scsi_transport_srp.c
  61. 2 2
      include/linux/compiler.h
  62. 7 0
      include/linux/skbuff.h
  63. 1 3
      include/net/act_api.h
  64. 24 0
      include/net/pkt_cls.h
  65. 2 1
      include/sound/seq_kernel.h
  66. 2 0
      include/sound/timer.h
  67. 3 3
      include/uapi/sound/asound.h
  68. 4 2
      kernel/events/core.c
  69. 20 3
      kernel/futex.c
  70. 1 5
      kernel/sched/cpufreq_schedutil.c
  71. 10 5
      kernel/watchdog_hld.c
  72. 2 1
      kernel/workqueue_internal.h
  73. 2 2
      lib/asn1_decoder.c
  74. 1 0
      net/core/skbuff.c
  75. 1 1
      net/ipv4/tcp_input.c
  76. 9 15
      net/l2tp/l2tp_ip.c
  77. 9 15
      net/l2tp/l2tp_ip6.c
  78. 1 1
      net/qrtr/qrtr.c
  79. 0 2
      net/sched/act_api.c
  80. 1 1
      net/sched/act_bpf.c
  81. 1 1
      net/sched/act_connmark.c
  82. 1 1
      net/sched/act_csum.c
  83. 1 1
      net/sched/act_gact.c
  84. 1 1
      net/sched/act_ife.c
  85. 2 2
      net/sched/act_ipt.c
  86. 1 1
      net/sched/act_mirred.c
  87. 1 1
      net/sched/act_nat.c
  88. 1 1
      net/sched/act_pedit.c
  89. 1 1
      net/sched/act_police.c
  90. 1 1
      net/sched/act_sample.c
  91. 1 1
      net/sched/act_simple.c
  92. 1 1
      net/sched/act_skbedit.c
  93. 1 1
      net/sched/act_skbmod.c
  94. 1 1
      net/sched/act_tunnel_key.c
  95. 1 1
      net/sched/act_vlan.c
  96. 1 0
      net/sched/cls_api.c
  97. 15 5
      net/sched/cls_basic.c
  98. 6 1
      net/sched/cls_bpf.c
  99. 18 6
      net/sched/cls_cgroup.c
  100. 18 6
      net/sched/cls_flow.c

+ 3 - 0
.mailmap

@@ -15,6 +15,7 @@ Adriana Reus <adi.reus@gmail.com> <adriana.reus@intel.com>
 Alan Cox <alan@lxorguk.ukuu.org.uk>
 Alan Cox <alan@lxorguk.ukuu.org.uk>
 Alan Cox <root@hraefn.swansea.linux.org.uk>
 Alan Cox <root@hraefn.swansea.linux.org.uk>
 Aleksey Gorelov <aleksey_gorelov@phoenix.com>
 Aleksey Gorelov <aleksey_gorelov@phoenix.com>
+Aleksandar Markovic <aleksandar.markovic@mips.com> <aleksandar.markovic@imgtec.com>
 Al Viro <viro@ftp.linux.org.uk>
 Al Viro <viro@ftp.linux.org.uk>
 Al Viro <viro@zenIV.linux.org.uk>
 Al Viro <viro@zenIV.linux.org.uk>
 Andreas Herrmann <aherrman@de.ibm.com>
 Andreas Herrmann <aherrman@de.ibm.com>
@@ -101,6 +102,7 @@ Leonid I Ananiev <leonid.i.ananiev@intel.com>
 Linas Vepstas <linas@austin.ibm.com>
 Linas Vepstas <linas@austin.ibm.com>
 Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
 Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
 Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
 Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
+Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
 Mark Brown <broonie@sirena.org.uk>
 Mark Brown <broonie@sirena.org.uk>
 Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
 Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
 Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
 Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
@@ -119,6 +121,7 @@ Matt Redfearn <matt.redfearn@mips.com> <matt.redfearn@imgtec.com>
 Mayuresh Janorkar <mayur@ti.com>
 Mayuresh Janorkar <mayur@ti.com>
 Michael Buesch <m@bues.ch>
 Michael Buesch <m@bues.ch>
 Michel Dänzer <michel@tungstengraphics.com>
 Michel Dänzer <michel@tungstengraphics.com>
+Miodrag Dinic <miodrag.dinic@mips.com> <miodrag.dinic@imgtec.com>
 Mitesh shah <mshah@teja.com>
 Mitesh shah <mshah@teja.com>
 Mohit Kumar <mohit.kumar@st.com> <mohit.kumar.dhaka@gmail.com>
 Mohit Kumar <mohit.kumar@st.com> <mohit.kumar.dhaka@gmail.com>
 Morten Welinder <terra@gnome.org>
 Morten Welinder <terra@gnome.org>

+ 16 - 6
Documentation/process/kernel-enforcement-statement.rst

@@ -50,8 +50,9 @@ be stronger.
 Except where noted below, we speak only for ourselves, and not for any company
 Except where noted below, we speak only for ourselves, and not for any company
 we might work for today, have in the past, or will in the future.
 we might work for today, have in the past, or will in the future.
 
 
+  - Laura Abbott
   - Bjorn Andersson (Linaro)
   - Bjorn Andersson (Linaro)
-  - Andrea Arcangeli (Red Hat)
+  - Andrea Arcangeli
   - Neil Armstrong
   - Neil Armstrong
   - Jens Axboe
   - Jens Axboe
   - Pablo Neira Ayuso
   - Pablo Neira Ayuso
@@ -60,15 +61,17 @@ we might work for today, have in the past, or will in the future.
   - Felipe Balbi
   - Felipe Balbi
   - Arnd Bergmann
   - Arnd Bergmann
   - Ard Biesheuvel
   - Ard Biesheuvel
-  - Paolo Bonzini (Red Hat)
+  - Tim Bird
+  - Paolo Bonzini
   - Christian Borntraeger
   - Christian Borntraeger
   - Mark Brown (Linaro)
   - Mark Brown (Linaro)
   - Paul Burton
   - Paul Burton
   - Javier Martinez Canillas
   - Javier Martinez Canillas
   - Rob Clark
   - Rob Clark
   - Jonathan Corbet
   - Jonathan Corbet
+  - Dennis Dalessandro
   - Vivien Didelot (Savoir-faire Linux)
   - Vivien Didelot (Savoir-faire Linux)
-  - Hans de Goede (Red Hat)
+  - Hans de Goede
   - Mel Gorman (SUSE)
   - Mel Gorman (SUSE)
   - Sven Eckelmann
   - Sven Eckelmann
   - Alex Elder (Linaro)
   - Alex Elder (Linaro)
@@ -79,6 +82,7 @@ we might work for today, have in the past, or will in the future.
   - Juergen Gross
   - Juergen Gross
   - Shawn Guo
   - Shawn Guo
   - Ulf Hansson
   - Ulf Hansson
+  - Stephen Hemminger (Microsoft)
   - Tejun Heo
   - Tejun Heo
   - Rob Herring
   - Rob Herring
   - Masami Hiramatsu
   - Masami Hiramatsu
@@ -104,18 +108,21 @@ we might work for today, have in the past, or will in the future.
   - Viresh Kumar
   - Viresh Kumar
   - Aneesh Kumar K.V
   - Aneesh Kumar K.V
   - Julia Lawall
   - Julia Lawall
-  - Doug Ledford (Red Hat)
+  - Doug Ledford
   - Chuck Lever (Oracle)
   - Chuck Lever (Oracle)
   - Daniel Lezcano
   - Daniel Lezcano
   - Shaohua Li
   - Shaohua Li
-  - Xin Long (Red Hat)
+  - Xin Long
   - Tony Luck
   - Tony Luck
+  - Catalin Marinas (Arm Ltd)
   - Mike Marshall
   - Mike Marshall
   - Chris Mason
   - Chris Mason
   - Paul E. McKenney
   - Paul E. McKenney
   - David S. Miller
   - David S. Miller
   - Ingo Molnar
   - Ingo Molnar
   - Kuninori Morimoto
   - Kuninori Morimoto
+  - Trond Myklebust
+  - Martin K. Petersen (Oracle)
   - Borislav Petkov
   - Borislav Petkov
   - Jiri Pirko
   - Jiri Pirko
   - Josh Poimboeuf
   - Josh Poimboeuf
@@ -124,18 +131,20 @@ we might work for today, have in the past, or will in the future.
   - Joerg Roedel
   - Joerg Roedel
   - Leon Romanovsky
   - Leon Romanovsky
   - Steven Rostedt (VMware)
   - Steven Rostedt (VMware)
-  - Ivan Safonov
+  - Frank Rowand
   - Ivan Safonov
   - Ivan Safonov
   - Anna Schumaker
   - Anna Schumaker
   - Jes Sorensen
   - Jes Sorensen
   - K.Y. Srinivasan
   - K.Y. Srinivasan
   - Heiko Stuebner
   - Heiko Stuebner
   - Jiri Kosina (SUSE)
   - Jiri Kosina (SUSE)
+  - Willy Tarreau
   - Dmitry Torokhov
   - Dmitry Torokhov
   - Linus Torvalds
   - Linus Torvalds
   - Thierry Reding
   - Thierry Reding
   - Rik van Riel
   - Rik van Riel
   - Geert Uytterhoeven (Glider bvba)
   - Geert Uytterhoeven (Glider bvba)
+  - Eduardo Valentin (Amazon.com)
   - Daniel Vetter
   - Daniel Vetter
   - Linus Walleij
   - Linus Walleij
   - Richard Weinberger
   - Richard Weinberger
@@ -145,3 +154,4 @@ we might work for today, have in the past, or will in the future.
   - Masahiro Yamada
   - Masahiro Yamada
   - Wei Yongjun
   - Wei Yongjun
   - Lv Zheng
   - Lv Zheng
+  - Marc Zyngier (Arm Ltd)

+ 9 - 5
MAINTAINERS

@@ -873,7 +873,7 @@ F:	drivers/android/
 F:	drivers/staging/android/
 F:	drivers/staging/android/
 
 
 ANDROID GOLDFISH RTC DRIVER
 ANDROID GOLDFISH RTC DRIVER
-M:	Miodrag Dinic <miodrag.dinic@imgtec.com>
+M:	Miodrag Dinic <miodrag.dinic@mips.com>
 S:	Supported
 S:	Supported
 F:	Documentation/devicetree/bindings/rtc/google,goldfish-rtc.txt
 F:	Documentation/devicetree/bindings/rtc/google,goldfish-rtc.txt
 F:	drivers/rtc/rtc-goldfish.c
 F:	drivers/rtc/rtc-goldfish.c
@@ -7752,6 +7752,11 @@ S:	Maintained
 F:	Documentation/scsi/53c700.txt
 F:	Documentation/scsi/53c700.txt
 F:	drivers/scsi/53c700*
 F:	drivers/scsi/53c700*
 
 
+LEAKING_ADDRESSES
+M:	Tobin C. Harding <me@tobin.cc>
+S:	Maintained
+F:	scripts/leaking_addresses.pl
+
 LED SUBSYSTEM
 LED SUBSYSTEM
 M:	Richard Purdie <rpurdie@rpsys.net>
 M:	Richard Purdie <rpurdie@rpsys.net>
 M:	Jacek Anaszewski <jacek.anaszewski@gmail.com>
 M:	Jacek Anaszewski <jacek.anaszewski@gmail.com>
@@ -9027,7 +9032,7 @@ F:	drivers/*/*loongson1*
 F:	drivers/*/*/*loongson1*
 F:	drivers/*/*/*loongson1*
 
 
 MIPS RINT INSTRUCTION EMULATION
 MIPS RINT INSTRUCTION EMULATION
-M:	Aleksandar Markovic <aleksandar.markovic@imgtec.com>
+M:	Aleksandar Markovic <aleksandar.markovic@mips.com>
 L:	linux-mips@linux-mips.org
 L:	linux-mips@linux-mips.org
 S:	Supported
 S:	Supported
 F:	arch/mips/math-emu/sp_rint.c
 F:	arch/mips/math-emu/sp_rint.c
@@ -10692,10 +10697,9 @@ S:	Maintained
 F:	drivers/pinctrl/spear/
 F:	drivers/pinctrl/spear/
 
 
 PISTACHIO SOC SUPPORT
 PISTACHIO SOC SUPPORT
-M:	James Hartley <james.hartley@imgtec.com>
-M:	Ionela Voinescu <ionela.voinescu@imgtec.com>
+M:	James Hartley <james.hartley@sondrel.com>
 L:	linux-mips@linux-mips.org
 L:	linux-mips@linux-mips.org
-S:	Maintained
+S:	Odd Fixes
 F:	arch/mips/pistachio/
 F:	arch/mips/pistachio/
 F:	arch/mips/include/asm/mach-pistachio/
 F:	arch/mips/include/asm/mach-pistachio/
 F:	arch/mips/boot/dts/img/pistachio*
 F:	arch/mips/boot/dts/img/pistachio*

+ 1 - 1
Makefile

@@ -2,7 +2,7 @@
 VERSION = 4
 VERSION = 4
 PATCHLEVEL = 14
 PATCHLEVEL = 14
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION = -rc8
 NAME = Fearless Coyote
 NAME = Fearless Coyote
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*

+ 2 - 0
arch/arm/Makefile

@@ -44,10 +44,12 @@ endif
 
 
 ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
 ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
 KBUILD_CPPFLAGS	+= -mbig-endian
 KBUILD_CPPFLAGS	+= -mbig-endian
+CHECKFLAGS	+= -D__ARMEB__
 AS		+= -EB
 AS		+= -EB
 LD		+= -EB
 LD		+= -EB
 else
 else
 KBUILD_CPPFLAGS	+= -mlittle-endian
 KBUILD_CPPFLAGS	+= -mlittle-endian
+CHECKFLAGS	+= -D__ARMEL__
 AS		+= -EL
 AS		+= -EL
 LD		+= -EL
 LD		+= -EL
 endif
 endif

+ 9 - 0
arch/arm/boot/compressed/vmlinux.lds.S

@@ -85,6 +85,15 @@ SECTIONS
 
 
   _edata = .;
   _edata = .;
 
 
+  /*
+   * The image_end section appears after any additional loadable sections
+   * that the linker may decide to insert in the binary image.  Having
+   * this symbol allows further debug in the near future.
+   */
+  .image_end (NOLOAD) : {
+    _edata_real = .;
+  }
+
   _magic_sig = ZIMAGE_MAGIC(0x016f2818);
   _magic_sig = ZIMAGE_MAGIC(0x016f2818);
   _magic_start = ZIMAGE_MAGIC(_start);
   _magic_start = ZIMAGE_MAGIC(_start);
   _magic_end = ZIMAGE_MAGIC(_edata);
   _magic_end = ZIMAGE_MAGIC(_edata);

+ 2 - 2
arch/arm/boot/dts/armada-375.dtsi

@@ -178,9 +178,9 @@
 				reg = <0x8000 0x1000>;
 				reg = <0x8000 0x1000>;
 				cache-unified;
 				cache-unified;
 				cache-level = <2>;
 				cache-level = <2>;
-				arm,double-linefill-incr = <1>;
+				arm,double-linefill-incr = <0>;
 				arm,double-linefill-wrap = <0>;
 				arm,double-linefill-wrap = <0>;
-				arm,double-linefill = <1>;
+				arm,double-linefill = <0>;
 				prefetch-data = <1>;
 				prefetch-data = <1>;
 			};
 			};
 
 

+ 2 - 2
arch/arm/boot/dts/armada-38x.dtsi

@@ -143,9 +143,9 @@
 				reg = <0x8000 0x1000>;
 				reg = <0x8000 0x1000>;
 				cache-unified;
 				cache-unified;
 				cache-level = <2>;
 				cache-level = <2>;
-				arm,double-linefill-incr = <1>;
+				arm,double-linefill-incr = <0>;
 				arm,double-linefill-wrap = <0>;
 				arm,double-linefill-wrap = <0>;
-				arm,double-linefill = <1>;
+				arm,double-linefill = <0>;
 				prefetch-data = <1>;
 				prefetch-data = <1>;
 			};
 			};
 
 

+ 2 - 2
arch/arm/boot/dts/armada-39x.dtsi

@@ -111,9 +111,9 @@
 				reg = <0x8000 0x1000>;
 				reg = <0x8000 0x1000>;
 				cache-unified;
 				cache-unified;
 				cache-level = <2>;
 				cache-level = <2>;
-				arm,double-linefill-incr = <1>;
+				arm,double-linefill-incr = <0>;
 				arm,double-linefill-wrap = <0>;
 				arm,double-linefill-wrap = <0>;
-				arm,double-linefill = <1>;
+				arm,double-linefill = <0>;
 				prefetch-data = <1>;
 				prefetch-data = <1>;
 			};
 			};
 
 

+ 6 - 3
arch/arm/boot/dts/uniphier-ld4.dtsi

@@ -209,7 +209,8 @@
 			interrupts = <0 80 4>;
 			interrupts = <0 80 4>;
 			pinctrl-names = "default";
 			pinctrl-names = "default";
 			pinctrl-0 = <&pinctrl_usb0>;
 			pinctrl-0 = <&pinctrl_usb0>;
-			clocks = <&mio_clk 7>, <&mio_clk 8>, <&mio_clk 12>;
+			clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 8>,
+				 <&mio_clk 12>;
 			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 8>,
 			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 8>,
 				 <&mio_rst 12>;
 				 <&mio_rst 12>;
 		};
 		};
@@ -221,7 +222,8 @@
 			interrupts = <0 81 4>;
 			interrupts = <0 81 4>;
 			pinctrl-names = "default";
 			pinctrl-names = "default";
 			pinctrl-0 = <&pinctrl_usb1>;
 			pinctrl-0 = <&pinctrl_usb1>;
-			clocks = <&mio_clk 7>, <&mio_clk 9>, <&mio_clk 13>;
+			clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 9>,
+				 <&mio_clk 13>;
 			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 9>,
 			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 9>,
 				 <&mio_rst 13>;
 				 <&mio_rst 13>;
 		};
 		};
@@ -233,7 +235,8 @@
 			interrupts = <0 82 4>;
 			interrupts = <0 82 4>;
 			pinctrl-names = "default";
 			pinctrl-names = "default";
 			pinctrl-0 = <&pinctrl_usb2>;
 			pinctrl-0 = <&pinctrl_usb2>;
-			clocks = <&mio_clk 7>, <&mio_clk 10>, <&mio_clk 14>;
+			clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 10>,
+				 <&mio_clk 14>;
 			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 10>,
 			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 10>,
 				 <&mio_rst 14>;
 				 <&mio_rst 14>;
 		};
 		};

+ 4 - 2
arch/arm/boot/dts/uniphier-pro4.dtsi

@@ -241,7 +241,8 @@
 			interrupts = <0 80 4>;
 			interrupts = <0 80 4>;
 			pinctrl-names = "default";
 			pinctrl-names = "default";
 			pinctrl-0 = <&pinctrl_usb2>;
 			pinctrl-0 = <&pinctrl_usb2>;
-			clocks = <&mio_clk 7>, <&mio_clk 8>, <&mio_clk 12>;
+			clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 8>,
+				 <&mio_clk 12>;
 			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 8>,
 			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 8>,
 				 <&mio_rst 12>;
 				 <&mio_rst 12>;
 		};
 		};
@@ -253,7 +254,8 @@
 			interrupts = <0 81 4>;
 			interrupts = <0 81 4>;
 			pinctrl-names = "default";
 			pinctrl-names = "default";
 			pinctrl-0 = <&pinctrl_usb3>;
 			pinctrl-0 = <&pinctrl_usb3>;
-			clocks = <&mio_clk 7>, <&mio_clk 9>, <&mio_clk 13>;
+			clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 9>,
+				 <&mio_clk 13>;
 			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 9>,
 			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 9>,
 				 <&mio_rst 13>;
 				 <&mio_rst 13>;
 		};
 		};

+ 6 - 3
arch/arm/boot/dts/uniphier-sld8.dtsi

@@ -209,7 +209,8 @@
 			interrupts = <0 80 4>;
 			interrupts = <0 80 4>;
 			pinctrl-names = "default";
 			pinctrl-names = "default";
 			pinctrl-0 = <&pinctrl_usb0>;
 			pinctrl-0 = <&pinctrl_usb0>;
-			clocks = <&mio_clk 7>, <&mio_clk 8>, <&mio_clk 12>;
+			clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 8>,
+				 <&mio_clk 12>;
 			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 8>,
 			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 8>,
 				 <&mio_rst 12>;
 				 <&mio_rst 12>;
 		};
 		};
@@ -221,7 +222,8 @@
 			interrupts = <0 81 4>;
 			interrupts = <0 81 4>;
 			pinctrl-names = "default";
 			pinctrl-names = "default";
 			pinctrl-0 = <&pinctrl_usb1>;
 			pinctrl-0 = <&pinctrl_usb1>;
-			clocks = <&mio_clk 7>, <&mio_clk 9>, <&mio_clk 13>;
+			clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 9>,
+				 <&mio_clk 13>;
 			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 9>,
 			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 9>,
 				 <&mio_rst 13>;
 				 <&mio_rst 13>;
 		};
 		};
@@ -233,7 +235,8 @@
 			interrupts = <0 82 4>;
 			interrupts = <0 82 4>;
 			pinctrl-names = "default";
 			pinctrl-names = "default";
 			pinctrl-0 = <&pinctrl_usb2>;
 			pinctrl-0 = <&pinctrl_usb2>;
-			clocks = <&mio_clk 7>, <&mio_clk 10>, <&mio_clk 14>;
+			clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 10>,
+				 <&mio_clk 14>;
 			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 10>,
 			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 10>,
 				 <&mio_rst 14>;
 				 <&mio_rst 14>;
 		};
 		};

+ 0 - 1
arch/arm/include/asm/Kbuild

@@ -20,7 +20,6 @@ generic-y += simd.h
 generic-y += sizes.h
 generic-y += sizes.h
 generic-y += timex.h
 generic-y += timex.h
 generic-y += trace_clock.h
 generic-y += trace_clock.h
-generic-y += unaligned.h
 
 
 generated-y += mach-types.h
 generated-y += mach-types.h
 generated-y += unistd-nr.h
 generated-y += unistd-nr.h

+ 27 - 0
arch/arm/include/asm/unaligned.h

@@ -0,0 +1,27 @@
+#ifndef __ASM_ARM_UNALIGNED_H
+#define __ASM_ARM_UNALIGNED_H
+
+/*
+ * We generally want to set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS on ARMv6+,
+ * but we don't want to use linux/unaligned/access_ok.h since that can lead
+ * to traps on unaligned stm/ldm or strd/ldrd.
+ */
+#include <asm/byteorder.h>
+
+#if defined(__LITTLE_ENDIAN)
+# include <linux/unaligned/le_struct.h>
+# include <linux/unaligned/be_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned	__get_unaligned_le
+# define put_unaligned	__put_unaligned_le
+#elif defined(__BIG_ENDIAN)
+# include <linux/unaligned/be_struct.h>
+# include <linux/unaligned/le_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned	__get_unaligned_be
+# define put_unaligned	__put_unaligned_be
+#else
+# error need to define endianess
+#endif
+
+#endif /* __ASM_ARM_UNALIGNED_H */

+ 2 - 4
arch/arm/kvm/emulate.c

@@ -227,7 +227,7 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
 	u32 return_offset = (is_thumb) ? 2 : 4;
 	u32 return_offset = (is_thumb) ? 2 : 4;
 
 
 	kvm_update_psr(vcpu, UND_MODE);
 	kvm_update_psr(vcpu, UND_MODE);
-	*vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) - return_offset;
+	*vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
 
 
 	/* Branch to exception vector */
 	/* Branch to exception vector */
 	*vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
 	*vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
@@ -239,10 +239,8 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
  */
  */
 static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
 static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
 {
 {
-	unsigned long cpsr = *vcpu_cpsr(vcpu);
-	bool is_thumb = (cpsr & PSR_T_BIT);
 	u32 vect_offset;
 	u32 vect_offset;
-	u32 return_offset = (is_thumb) ? 4 : 0;
+	u32 return_offset = (is_pabt) ? 4 : 8;
 	bool is_lpae;
 	bool is_lpae;
 
 
 	kvm_update_psr(vcpu, ABT_MODE);
 	kvm_update_psr(vcpu, ABT_MODE);

+ 1 - 1
arch/arm/kvm/hyp/Makefile

@@ -3,7 +3,7 @@
 # Makefile for Kernel-based Virtual Machine module, HYP part
 # Makefile for Kernel-based Virtual Machine module, HYP part
 #
 #
 
 
-ccflags-y += -fno-stack-protector
+ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
 
 
 KVM=../../../../virt/kvm
 KVM=../../../../virt/kvm
 
 

+ 6 - 3
arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi

@@ -299,7 +299,8 @@
 			interrupts = <0 243 4>;
 			interrupts = <0 243 4>;
 			pinctrl-names = "default";
 			pinctrl-names = "default";
 			pinctrl-0 = <&pinctrl_usb0>;
 			pinctrl-0 = <&pinctrl_usb0>;
-			clocks = <&mio_clk 7>, <&mio_clk 8>, <&mio_clk 12>;
+			clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 8>,
+				 <&mio_clk 12>;
 			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 8>,
 			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 8>,
 				 <&mio_rst 12>;
 				 <&mio_rst 12>;
 		};
 		};
@@ -311,7 +312,8 @@
 			interrupts = <0 244 4>;
 			interrupts = <0 244 4>;
 			pinctrl-names = "default";
 			pinctrl-names = "default";
 			pinctrl-0 = <&pinctrl_usb1>;
 			pinctrl-0 = <&pinctrl_usb1>;
-			clocks = <&mio_clk 7>, <&mio_clk 9>, <&mio_clk 13>;
+			clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 9>,
+				 <&mio_clk 13>;
 			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 9>,
 			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 9>,
 				 <&mio_rst 13>;
 				 <&mio_rst 13>;
 		};
 		};
@@ -323,7 +325,8 @@
 			interrupts = <0 245 4>;
 			interrupts = <0 245 4>;
 			pinctrl-names = "default";
 			pinctrl-names = "default";
 			pinctrl-0 = <&pinctrl_usb2>;
 			pinctrl-0 = <&pinctrl_usb2>;
-			clocks = <&mio_clk 7>, <&mio_clk 10>, <&mio_clk 14>;
+			clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 10>,
+				 <&mio_clk 14>;
 			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 10>,
 			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 10>,
 				 <&mio_rst 14>;
 				 <&mio_rst 14>;
 		};
 		};

+ 1 - 1
arch/arm64/kvm/hyp/Makefile

@@ -3,7 +3,7 @@
 # Makefile for Kernel-based Virtual Machine module, HYP part
 # Makefile for Kernel-based Virtual Machine module, HYP part
 #
 #
 
 
-ccflags-y += -fno-stack-protector
+ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
 
 
 KVM=../../../../virt/kvm
 KVM=../../../../virt/kvm
 
 

+ 15 - 1
arch/arm64/kvm/inject_fault.c

@@ -33,12 +33,26 @@
 #define LOWER_EL_AArch64_VECTOR		0x400
 #define LOWER_EL_AArch64_VECTOR		0x400
 #define LOWER_EL_AArch32_VECTOR		0x600
 #define LOWER_EL_AArch32_VECTOR		0x600
 
 
+/*
+ * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
+ */
+static const u8 return_offsets[8][2] = {
+	[0] = { 0, 0 },		/* Reset, unused */
+	[1] = { 4, 2 },		/* Undefined */
+	[2] = { 0, 0 },		/* SVC, unused */
+	[3] = { 4, 4 },		/* Prefetch abort */
+	[4] = { 8, 8 },		/* Data abort */
+	[5] = { 0, 0 },		/* HVC, unused */
+	[6] = { 4, 4 },		/* IRQ, unused */
+	[7] = { 4, 4 },		/* FIQ, unused */
+};
+
 static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
 static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
 {
 {
 	unsigned long cpsr;
 	unsigned long cpsr;
 	unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
 	unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
 	bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
 	bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
-	u32 return_offset = (is_thumb) ? 4 : 0;
+	u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
 	u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
 	u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
 
 
 	cpsr = mode | COMPAT_PSR_I_BIT;
 	cpsr = mode | COMPAT_PSR_I_BIT;

+ 2 - 0
arch/ia64/include/asm/acpi.h

@@ -112,6 +112,8 @@ static inline void arch_acpi_set_pdc_bits(u32 *buf)
 	buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
 	buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
 }
 }
 
 
+#define acpi_unlazy_tlb(x)
+
 #ifdef CONFIG_ACPI_NUMA
 #ifdef CONFIG_ACPI_NUMA
 extern cpumask_t early_cpu_possible_map;
 extern cpumask_t early_cpu_possible_map;
 #define for_each_possible_early_cpu(cpu)  \
 #define for_each_possible_early_cpu(cpu)  \

+ 1 - 1
arch/mips/generic/board-ni169445.its.S

@@ -1,4 +1,4 @@
-{
+/ {
 	images {
 	images {
 		fdt@ni169445 {
 		fdt@ni169445 {
 			description = "NI 169445 device tree";
 			description = "NI 169445 device tree";

+ 1 - 1
arch/mips/generic/init.c

@@ -20,7 +20,7 @@
 #include <asm/fw/fw.h>
 #include <asm/fw/fw.h>
 #include <asm/irq_cpu.h>
 #include <asm/irq_cpu.h>
 #include <asm/machine.h>
 #include <asm/machine.h>
-#include <asm/mips-cpc.h>
+#include <asm/mips-cps.h>
 #include <asm/prom.h>
 #include <asm/prom.h>
 #include <asm/smp-ops.h>
 #include <asm/smp-ops.h>
 #include <asm/time.h>
 #include <asm/time.h>

+ 1 - 1
arch/mips/generic/kexec.c

@@ -1,6 +1,6 @@
 /*
 /*
  * Copyright (C) 2016 Imagination Technologies
  * Copyright (C) 2016 Imagination Technologies
- * Author: Marcin Nowakowski <marcin.nowakowski@imgtec.com>
+ * Author: Marcin Nowakowski <marcin.nowakowski@mips.com>
  *
  *
  * This program is free software; you can redistribute it and/or modify it
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the
  * under the terms of the GNU General Public License as published by the

+ 2 - 2
arch/mips/include/asm/mips-cm.h

@@ -142,8 +142,8 @@ GCR_ACCESSOR_RO(64, 0x000, config)
 GCR_ACCESSOR_RW(64, 0x008, base)
 GCR_ACCESSOR_RW(64, 0x008, base)
 #define CM_GCR_BASE_GCRBASE			GENMASK_ULL(47, 15)
 #define CM_GCR_BASE_GCRBASE			GENMASK_ULL(47, 15)
 #define CM_GCR_BASE_CMDEFTGT			GENMASK(1, 0)
 #define CM_GCR_BASE_CMDEFTGT			GENMASK(1, 0)
-#define  CM_GCR_BASE_CMDEFTGT_DISABLED		0
-#define  CM_GCR_BASE_CMDEFTGT_MEM		1
+#define  CM_GCR_BASE_CMDEFTGT_MEM		0
+#define  CM_GCR_BASE_CMDEFTGT_RESERVED		1
 #define  CM_GCR_BASE_CMDEFTGT_IOCU0		2
 #define  CM_GCR_BASE_CMDEFTGT_IOCU0		2
 #define  CM_GCR_BASE_CMDEFTGT_IOCU1		3
 #define  CM_GCR_BASE_CMDEFTGT_IOCU1		3
 
 

+ 4 - 4
arch/mips/include/asm/stackframe.h

@@ -199,6 +199,10 @@
 		sll	k0, 3		/* extract cu0 bit */
 		sll	k0, 3		/* extract cu0 bit */
 		.set	noreorder
 		.set	noreorder
 		bltz	k0, 8f
 		bltz	k0, 8f
+		 move	k0, sp
+		.if \docfi
+		.cfi_register sp, k0
+		.endif
 #ifdef CONFIG_EVA
 #ifdef CONFIG_EVA
 		/*
 		/*
 		 * Flush interAptiv's Return Prediction Stack (RPS) by writing
 		 * Flush interAptiv's Return Prediction Stack (RPS) by writing
@@ -225,10 +229,6 @@
 		MTC0	k0, CP0_ENTRYHI
 		MTC0	k0, CP0_ENTRYHI
 #endif
 #endif
 		.set	reorder
 		.set	reorder
-		 move	k0, sp
-		.if \docfi
-		.cfi_register sp, k0
-		.endif
 		/* Called from user mode, new stack. */
 		/* Called from user mode, new stack. */
 		get_saved_sp docfi=\docfi tosp=1
 		get_saved_sp docfi=\docfi tosp=1
 8:
 8:

+ 1 - 1
arch/mips/kernel/probes-common.h

@@ -1,6 +1,6 @@
 /*
 /*
  * Copyright (C) 2016 Imagination Technologies
  * Copyright (C) 2016 Imagination Technologies
- * Author: Marcin Nowakowski <marcin.nowakowski@imgtec.com>
+ * Author: Marcin Nowakowski <marcin.nowakowski@mips.com>
  *
  *
  * This program is free software; you can redistribute it and/or modify it
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the
  * under the terms of the GNU General Public License as published by the

+ 3 - 3
arch/mips/kernel/smp-cmp.c

@@ -19,7 +19,7 @@
 #undef DEBUG
 #undef DEBUG
 
 
 #include <linux/kernel.h>
 #include <linux/kernel.h>
-#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
 #include <linux/smp.h>
 #include <linux/smp.h>
 #include <linux/cpumask.h>
 #include <linux/cpumask.h>
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
@@ -50,8 +50,8 @@ static void cmp_init_secondary(void)
 
 
 #ifdef CONFIG_MIPS_MT_SMP
 #ifdef CONFIG_MIPS_MT_SMP
 	if (cpu_has_mipsmt)
 	if (cpu_has_mipsmt)
-		c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) &
-			TCBIND_CURVPE;
+		cpu_set_vpe_id(c, (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) &
+				  TCBIND_CURVPE);
 #endif
 #endif
 }
 }
 
 

+ 1 - 1
arch/mips/kernel/smp-cps.c

@@ -306,7 +306,7 @@ static int cps_boot_secondary(int cpu, struct task_struct *idle)
 	int err;
 	int err;
 
 
 	/* We don't yet support booting CPUs in other clusters */
 	/* We don't yet support booting CPUs in other clusters */
-	if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(&current_cpu_data))
+	if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(&raw_current_cpu_data))
 		return -ENOSYS;
 		return -ENOSYS;
 
 
 	vpe_cfg->pc = (unsigned long)&smp_bootstrap;
 	vpe_cfg->pc = (unsigned long)&smp_bootstrap;

+ 17 - 7
arch/mips/kernel/smp.c

@@ -42,7 +42,7 @@
 #include <asm/processor.h>
 #include <asm/processor.h>
 #include <asm/idle.h>
 #include <asm/idle.h>
 #include <asm/r4k-timer.h>
 #include <asm/r4k-timer.h>
-#include <asm/mips-cpc.h>
+#include <asm/mips-cps.h>
 #include <asm/mmu_context.h>
 #include <asm/mmu_context.h>
 #include <asm/time.h>
 #include <asm/time.h>
 #include <asm/setup.h>
 #include <asm/setup.h>
@@ -66,6 +66,7 @@ EXPORT_SYMBOL(cpu_sibling_map);
 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
 EXPORT_SYMBOL(cpu_core_map);
 EXPORT_SYMBOL(cpu_core_map);
 
 
+static DECLARE_COMPLETION(cpu_starting);
 static DECLARE_COMPLETION(cpu_running);
 static DECLARE_COMPLETION(cpu_running);
 
 
 /*
 /*
@@ -374,6 +375,12 @@ asmlinkage void start_secondary(void)
 	cpumask_set_cpu(cpu, &cpu_coherent_mask);
 	cpumask_set_cpu(cpu, &cpu_coherent_mask);
 	notify_cpu_starting(cpu);
 	notify_cpu_starting(cpu);
 
 
+	/* Notify boot CPU that we're starting & ready to sync counters */
+	complete(&cpu_starting);
+
+	synchronise_count_slave(cpu);
+
+	/* The CPU is running and counters synchronised, now mark it online */
 	set_cpu_online(cpu, true);
 	set_cpu_online(cpu, true);
 
 
 	set_cpu_sibling_map(cpu);
 	set_cpu_sibling_map(cpu);
@@ -381,8 +388,11 @@ asmlinkage void start_secondary(void)
 
 
 	calculate_cpu_foreign_map();
 	calculate_cpu_foreign_map();
 
 
+	/*
+	 * Notify boot CPU that we're up & online and it can safely return
+	 * from __cpu_up
+	 */
 	complete(&cpu_running);
 	complete(&cpu_running);
-	synchronise_count_slave(cpu);
 
 
 	/*
 	/*
 	 * irq will be enabled in ->smp_finish(), enabling it too early
 	 * irq will be enabled in ->smp_finish(), enabling it too early
@@ -445,17 +455,17 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
 	if (err)
 	if (err)
 		return err;
 		return err;
 
 
-	/*
-	 * We must check for timeout here, as the CPU will not be marked
-	 * online until the counters are synchronised.
-	 */
-	if (!wait_for_completion_timeout(&cpu_running,
+	/* Wait for CPU to start and be ready to sync counters */
+	if (!wait_for_completion_timeout(&cpu_starting,
 					 msecs_to_jiffies(1000))) {
 					 msecs_to_jiffies(1000))) {
 		pr_crit("CPU%u: failed to start\n", cpu);
 		pr_crit("CPU%u: failed to start\n", cpu);
 		return -EIO;
 		return -EIO;
 	}
 	}
 
 
 	synchronise_count_master(cpu);
 	synchronise_count_master(cpu);
+
+	/* Wait for CPU to finish startup & mark itself online before return */
+	wait_for_completion(&cpu_running);
 	return 0;
 	return 0;
 }
 }
 
 

+ 1 - 1
arch/mips/mm/uasm-micromips.c

@@ -80,7 +80,7 @@ static const struct insn const insn_table_MM[insn_invalid] = {
 	[insn_jr]	= {M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS},
 	[insn_jr]	= {M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS},
 	[insn_lb]	= {M(mm_lb32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
 	[insn_lb]	= {M(mm_lb32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
 	[insn_ld]	= {0, 0},
 	[insn_ld]	= {0, 0},
-	[insn_lh]	= {M(mm_lh32_op, 0, 0, 0, 0, 0), RS | RS | SIMM},
+	[insn_lh]	= {M(mm_lh32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
 	[insn_ll]	= {M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM},
 	[insn_ll]	= {M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM},
 	[insn_lld]	= {0, 0},
 	[insn_lld]	= {0, 0},
 	[insn_lui]	= {M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM},
 	[insn_lui]	= {M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM},

+ 1 - 1
arch/mips/net/ebpf_jit.c

@@ -1513,7 +1513,7 @@ ld_skb_common:
 		}
 		}
 		src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
 		src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
 		if (src < 0)
 		if (src < 0)
-			return dst;
+			return src;
 		if (BPF_MODE(insn->code) == BPF_XADD) {
 		if (BPF_MODE(insn->code) == BPF_XADD) {
 			switch (BPF_SIZE(insn->code)) {
 			switch (BPF_SIZE(insn->code)) {
 			case BPF_W:
 			case BPF_W:

+ 6 - 6
arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S

@@ -157,8 +157,8 @@ LABEL skip_ %I
 .endr
 .endr
 
 
 	# Find min length
 	# Find min length
-	vmovdqa _lens+0*16(state), %xmm0
-	vmovdqa _lens+1*16(state), %xmm1
+	vmovdqu _lens+0*16(state), %xmm0
+	vmovdqu _lens+1*16(state), %xmm1
 
 
 	vpminud %xmm1, %xmm0, %xmm2     # xmm2 has {D,C,B,A}
 	vpminud %xmm1, %xmm0, %xmm2     # xmm2 has {D,C,B,A}
 	vpalignr $8, %xmm2, %xmm3, %xmm3   # xmm3 has {x,x,D,C}
 	vpalignr $8, %xmm2, %xmm3, %xmm3   # xmm3 has {x,x,D,C}
@@ -178,8 +178,8 @@ LABEL skip_ %I
 	vpsubd  %xmm2, %xmm0, %xmm0
 	vpsubd  %xmm2, %xmm0, %xmm0
 	vpsubd  %xmm2, %xmm1, %xmm1
 	vpsubd  %xmm2, %xmm1, %xmm1
 
 
-	vmovdqa %xmm0, _lens+0*16(state)
-	vmovdqa %xmm1, _lens+1*16(state)
+	vmovdqu %xmm0, _lens+0*16(state)
+	vmovdqu %xmm1, _lens+1*16(state)
 
 
 	# "state" and "args" are the same address, arg1
 	# "state" and "args" are the same address, arg1
 	# len is arg2
 	# len is arg2
@@ -235,8 +235,8 @@ ENTRY(sha1_mb_mgr_get_comp_job_avx2)
 	jc      .return_null
 	jc      .return_null
 
 
 	# Find min length
 	# Find min length
-	vmovdqa _lens(state), %xmm0
-	vmovdqa _lens+1*16(state), %xmm1
+	vmovdqu _lens(state), %xmm0
+	vmovdqu _lens+1*16(state), %xmm1
 
 
 	vpminud %xmm1, %xmm0, %xmm2        # xmm2 has {D,C,B,A}
 	vpminud %xmm1, %xmm0, %xmm2        # xmm2 has {D,C,B,A}
 	vpalignr $8, %xmm2, %xmm3, %xmm3   # xmm3 has {x,x,D,C}
 	vpalignr $8, %xmm2, %xmm3, %xmm3   # xmm3 has {x,x,D,C}

+ 6 - 6
arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S

@@ -155,8 +155,8 @@ LABEL skip_ %I
 .endr
 .endr
 
 
 	# Find min length
 	# Find min length
-	vmovdqa _lens+0*16(state), %xmm0
-	vmovdqa _lens+1*16(state), %xmm1
+	vmovdqu _lens+0*16(state), %xmm0
+	vmovdqu _lens+1*16(state), %xmm1
 
 
 	vpminud %xmm1, %xmm0, %xmm2		# xmm2 has {D,C,B,A}
 	vpminud %xmm1, %xmm0, %xmm2		# xmm2 has {D,C,B,A}
 	vpalignr $8, %xmm2, %xmm3, %xmm3	# xmm3 has {x,x,D,C}
 	vpalignr $8, %xmm2, %xmm3, %xmm3	# xmm3 has {x,x,D,C}
@@ -176,8 +176,8 @@ LABEL skip_ %I
 	vpsubd	%xmm2, %xmm0, %xmm0
 	vpsubd	%xmm2, %xmm0, %xmm0
 	vpsubd	%xmm2, %xmm1, %xmm1
 	vpsubd	%xmm2, %xmm1, %xmm1
 
 
-	vmovdqa	%xmm0, _lens+0*16(state)
-	vmovdqa	%xmm1, _lens+1*16(state)
+	vmovdqu	%xmm0, _lens+0*16(state)
+	vmovdqu	%xmm1, _lens+1*16(state)
 
 
 	# "state" and "args" are the same address, arg1
 	# "state" and "args" are the same address, arg1
 	# len is arg2
 	# len is arg2
@@ -234,8 +234,8 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2)
 	jc	.return_null
 	jc	.return_null
 
 
 	# Find min length
 	# Find min length
-	vmovdqa	_lens(state), %xmm0
-	vmovdqa	_lens+1*16(state), %xmm1
+	vmovdqu	_lens(state), %xmm0
+	vmovdqu	_lens+1*16(state), %xmm1
 
 
 	vpminud	%xmm1, %xmm0, %xmm2		# xmm2 has {D,C,B,A}
 	vpminud	%xmm1, %xmm0, %xmm2		# xmm2 has {D,C,B,A}
 	vpalignr $8, %xmm2, %xmm3, %xmm3	# xmm3 has {x,x,D,C}
 	vpalignr $8, %xmm2, %xmm3, %xmm3	# xmm3 has {x,x,D,C}

+ 2 - 0
arch/x86/include/asm/acpi.h

@@ -150,6 +150,8 @@ static inline void disable_acpi(void) { }
 extern int x86_acpi_numa_init(void);
 extern int x86_acpi_numa_init(void);
 #endif /* CONFIG_ACPI_NUMA */
 #endif /* CONFIG_ACPI_NUMA */
 
 
+#define acpi_unlazy_tlb(x)	leave_mm(x)
+
 #ifdef CONFIG_ACPI_APEI
 #ifdef CONFIG_ACPI_APEI
 static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
 static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
 {
 {

+ 27 - 94
arch/x86/kernel/cpu/mcheck/dev-mcelog.c

@@ -24,14 +24,6 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex);
 static char mce_helper[128];
 static char mce_helper[128];
 static char *mce_helper_argv[2] = { mce_helper, NULL };
 static char *mce_helper_argv[2] = { mce_helper, NULL };
 
 
-#define mce_log_get_idx_check(p) \
-({ \
-	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
-			 !lockdep_is_held(&mce_chrdev_read_mutex), \
-			 "suspicious mce_log_get_idx_check() usage"); \
-	smp_load_acquire(&(p)); \
-})
-
 /*
 /*
  * Lockless MCE logging infrastructure.
  * Lockless MCE logging infrastructure.
  * This avoids deadlocks on printk locks without having to break locks. Also
  * This avoids deadlocks on printk locks without having to break locks. Also
@@ -53,43 +45,32 @@ static int dev_mce_log(struct notifier_block *nb, unsigned long val,
 				void *data)
 				void *data)
 {
 {
 	struct mce *mce = (struct mce *)data;
 	struct mce *mce = (struct mce *)data;
-	unsigned int next, entry;
-
-	wmb();
-	for (;;) {
-		entry = mce_log_get_idx_check(mcelog.next);
-		for (;;) {
-
-			/*
-			 * When the buffer fills up discard new entries.
-			 * Assume that the earlier errors are the more
-			 * interesting ones:
-			 */
-			if (entry >= MCE_LOG_LEN) {
-				set_bit(MCE_OVERFLOW,
-					(unsigned long *)&mcelog.flags);
-				return NOTIFY_OK;
-			}
-			/* Old left over entry. Skip: */
-			if (mcelog.entry[entry].finished) {
-				entry++;
-				continue;
-			}
-			break;
-		}
-		smp_rmb();
-		next = entry + 1;
-		if (cmpxchg(&mcelog.next, entry, next) == entry)
-			break;
+	unsigned int entry;
+
+	mutex_lock(&mce_chrdev_read_mutex);
+
+	entry = mcelog.next;
+
+	/*
+	 * When the buffer fills up discard new entries. Assume that the
+	 * earlier errors are the more interesting ones:
+	 */
+	if (entry >= MCE_LOG_LEN) {
+		set_bit(MCE_OVERFLOW, (unsigned long *)&mcelog.flags);
+		goto unlock;
 	}
 	}
+
+	mcelog.next = entry + 1;
+
 	memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
 	memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
-	wmb();
 	mcelog.entry[entry].finished = 1;
 	mcelog.entry[entry].finished = 1;
-	wmb();
 
 
 	/* wake processes polling /dev/mcelog */
 	/* wake processes polling /dev/mcelog */
 	wake_up_interruptible(&mce_chrdev_wait);
 	wake_up_interruptible(&mce_chrdev_wait);
 
 
+unlock:
+	mutex_unlock(&mce_chrdev_read_mutex);
+
 	return NOTIFY_OK;
 	return NOTIFY_OK;
 }
 }
 
 
@@ -177,13 +158,6 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
 	return 0;
 	return 0;
 }
 }
 
 
-static void collect_tscs(void *data)
-{
-	unsigned long *cpu_tsc = (unsigned long *)data;
-
-	cpu_tsc[smp_processor_id()] = rdtsc();
-}
-
 static int mce_apei_read_done;
 static int mce_apei_read_done;
 
 
 /* Collect MCE record of previous boot in persistent storage via APEI ERST. */
 /* Collect MCE record of previous boot in persistent storage via APEI ERST. */
@@ -231,14 +205,9 @@ static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf,
 				size_t usize, loff_t *off)
 				size_t usize, loff_t *off)
 {
 {
 	char __user *buf = ubuf;
 	char __user *buf = ubuf;
-	unsigned long *cpu_tsc;
-	unsigned prev, next;
+	unsigned next;
 	int i, err;
 	int i, err;
 
 
-	cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
-	if (!cpu_tsc)
-		return -ENOMEM;
-
 	mutex_lock(&mce_chrdev_read_mutex);
 	mutex_lock(&mce_chrdev_read_mutex);
 
 
 	if (!mce_apei_read_done) {
 	if (!mce_apei_read_done) {
@@ -247,65 +216,29 @@ static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf,
 			goto out;
 			goto out;
 	}
 	}
 
 
-	next = mce_log_get_idx_check(mcelog.next);
-
 	/* Only supports full reads right now */
 	/* Only supports full reads right now */
 	err = -EINVAL;
 	err = -EINVAL;
 	if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
 	if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
 		goto out;
 		goto out;
 
 
+	next = mcelog.next;
 	err = 0;
 	err = 0;
-	prev = 0;
-	do {
-		for (i = prev; i < next; i++) {
-			unsigned long start = jiffies;
-			struct mce *m = &mcelog.entry[i];
-
-			while (!m->finished) {
-				if (time_after_eq(jiffies, start + 2)) {
-					memset(m, 0, sizeof(*m));
-					goto timeout;
-				}
-				cpu_relax();
-			}
-			smp_rmb();
-			err |= copy_to_user(buf, m, sizeof(*m));
-			buf += sizeof(*m);
-timeout:
-			;
-		}
-
-		memset(mcelog.entry + prev, 0,
-		       (next - prev) * sizeof(struct mce));
-		prev = next;
-		next = cmpxchg(&mcelog.next, prev, 0);
-	} while (next != prev);
-
-	synchronize_sched();
 
 
-	/*
-	 * Collect entries that were still getting written before the
-	 * synchronize.
-	 */
-	on_each_cpu(collect_tscs, cpu_tsc, 1);
-
-	for (i = next; i < MCE_LOG_LEN; i++) {
+	for (i = 0; i < next; i++) {
 		struct mce *m = &mcelog.entry[i];
 		struct mce *m = &mcelog.entry[i];
 
 
-		if (m->finished && m->tsc < cpu_tsc[m->cpu]) {
-			err |= copy_to_user(buf, m, sizeof(*m));
-			smp_rmb();
-			buf += sizeof(*m);
-			memset(m, 0, sizeof(*m));
-		}
+		err |= copy_to_user(buf, m, sizeof(*m));
+		buf += sizeof(*m);
 	}
 	}
 
 
+	memset(mcelog.entry, 0, next * sizeof(struct mce));
+	mcelog.next = 0;
+
 	if (err)
 	if (err)
 		err = -EFAULT;
 		err = -EFAULT;
 
 
 out:
 out:
 	mutex_unlock(&mce_chrdev_read_mutex);
 	mutex_unlock(&mce_chrdev_read_mutex);
-	kfree(cpu_tsc);
 
 
 	return err ? err : buf - ubuf;
 	return err ? err : buf - ubuf;
 }
 }

+ 1 - 1
arch/x86/kernel/kvmclock.c

@@ -79,7 +79,7 @@ static void kvm_get_wallclock(struct timespec *now)
 
 
 static int kvm_set_wallclock(const struct timespec *now)
 static int kvm_set_wallclock(const struct timespec *now)
 {
 {
-	return -1;
+	return -ENODEV;
 }
 }
 
 
 static u64 kvm_clock_read(void)
 static u64 kvm_clock_read(void)

+ 13 - 0
arch/x86/kernel/module.c

@@ -172,19 +172,27 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
 		case R_X86_64_NONE:
 		case R_X86_64_NONE:
 			break;
 			break;
 		case R_X86_64_64:
 		case R_X86_64_64:
+			if (*(u64 *)loc != 0)
+				goto invalid_relocation;
 			*(u64 *)loc = val;
 			*(u64 *)loc = val;
 			break;
 			break;
 		case R_X86_64_32:
 		case R_X86_64_32:
+			if (*(u32 *)loc != 0)
+				goto invalid_relocation;
 			*(u32 *)loc = val;
 			*(u32 *)loc = val;
 			if (val != *(u32 *)loc)
 			if (val != *(u32 *)loc)
 				goto overflow;
 				goto overflow;
 			break;
 			break;
 		case R_X86_64_32S:
 		case R_X86_64_32S:
+			if (*(s32 *)loc != 0)
+				goto invalid_relocation;
 			*(s32 *)loc = val;
 			*(s32 *)loc = val;
 			if ((s64)val != *(s32 *)loc)
 			if ((s64)val != *(s32 *)loc)
 				goto overflow;
 				goto overflow;
 			break;
 			break;
 		case R_X86_64_PC32:
 		case R_X86_64_PC32:
+			if (*(u32 *)loc != 0)
+				goto invalid_relocation;
 			val -= (u64)loc;
 			val -= (u64)loc;
 			*(u32 *)loc = val;
 			*(u32 *)loc = val;
 #if 0
 #if 0
@@ -200,6 +208,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
 	}
 	}
 	return 0;
 	return 0;
 
 
+invalid_relocation:
+	pr_err("x86/modules: Skipping invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n",
+	       (int)ELF64_R_TYPE(rel[i].r_info), loc, val);
+	return -ENOEXEC;
+
 overflow:
 overflow:
 	pr_err("overflow in relocation type %d val %Lx\n",
 	pr_err("overflow in relocation type %d val %Lx\n",
 	       (int)ELF64_R_TYPE(rel[i].r_info), val);
 	       (int)ELF64_R_TYPE(rel[i].r_info), val);

+ 5 - 0
arch/x86/kvm/lapic.c

@@ -1992,6 +1992,11 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
 				vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP);
 				vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP);
 	vcpu->arch.pv_eoi.msr_val = 0;
 	vcpu->arch.pv_eoi.msr_val = 0;
 	apic_update_ppr(apic);
 	apic_update_ppr(apic);
+	if (vcpu->arch.apicv_active) {
+		kvm_x86_ops->apicv_post_state_restore(vcpu);
+		kvm_x86_ops->hwapic_irr_update(vcpu, -1);
+		kvm_x86_ops->hwapic_isr_update(vcpu, -1);
+	}
 
 
 	vcpu->arch.apic_arb_prio = 0;
 	vcpu->arch.apic_arb_prio = 0;
 	vcpu->arch.apic_attention = 0;
 	vcpu->arch.apic_attention = 0;

+ 0 - 3
arch/x86/kvm/vmx.c

@@ -5619,9 +5619,6 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
 
 
 	kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
 	kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
 
 
-	if (kvm_vcpu_apicv_active(vcpu))
-		memset(&vmx->pi_desc, 0, sizeof(struct pi_desc));
-
 	if (vmx->vpid != 0)
 	if (vmx->vpid != 0)
 		vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
 		vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
 
 

+ 1 - 1
arch/x86/mm/mem_encrypt.c

@@ -40,7 +40,7 @@ static char sme_cmdline_off[] __initdata = "off";
  * section is later cleared.
  * section is later cleared.
  */
  */
 u64 sme_me_mask __section(.data) = 0;
 u64 sme_me_mask __section(.data) = 0;
-EXPORT_SYMBOL_GPL(sme_me_mask);
+EXPORT_SYMBOL(sme_me_mask);
 
 
 /* Buffer used for early in-place encryption by BSP, no locking needed */
 /* Buffer used for early in-place encryption by BSP, no locking needed */
 static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE);
 static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE);

+ 14 - 3
arch/x86/mm/tlb.c

@@ -85,6 +85,7 @@ void leave_mm(int cpu)
 
 
 	switch_mm(NULL, &init_mm, NULL);
 	switch_mm(NULL, &init_mm, NULL);
 }
 }
+EXPORT_SYMBOL_GPL(leave_mm);
 
 
 void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 	       struct task_struct *tsk)
 	       struct task_struct *tsk)
@@ -195,12 +196,22 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
 			this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
 			this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
 			this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
 			this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
 			write_cr3(build_cr3(next, new_asid));
 			write_cr3(build_cr3(next, new_asid));
-			trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH,
-					TLB_FLUSH_ALL);
+
+			/*
+			 * NB: This gets called via leave_mm() in the idle path
+			 * where RCU functions differently.  Tracing normally
+			 * uses RCU, so we need to use the _rcuidle variant.
+			 *
+			 * (There is no good reason for this.  The idle code should
+			 *  be rearranged to call this before rcu_idle_enter().)
+			 */
+			trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
 		} else {
 		} else {
 			/* The new ASID is already up to date. */
 			/* The new ASID is already up to date. */
 			write_cr3(build_cr3_noflush(next, new_asid));
 			write_cr3(build_cr3_noflush(next, new_asid));
-			trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0);
+
+			/* See above wrt _rcuidle. */
+			trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
 		}
 		}
 
 
 		this_cpu_write(cpu_tlbstate.loaded_mm, next);
 		this_cpu_write(cpu_tlbstate.loaded_mm, next);

+ 3 - 1
crypto/ccm.c

@@ -363,7 +363,7 @@ static int crypto_ccm_decrypt(struct aead_request *req)
 	unsigned int cryptlen = req->cryptlen;
 	unsigned int cryptlen = req->cryptlen;
 	u8 *authtag = pctx->auth_tag;
 	u8 *authtag = pctx->auth_tag;
 	u8 *odata = pctx->odata;
 	u8 *odata = pctx->odata;
-	u8 *iv = req->iv;
+	u8 *iv = pctx->idata;
 	int err;
 	int err;
 
 
 	cryptlen -= authsize;
 	cryptlen -= authsize;
@@ -379,6 +379,8 @@ static int crypto_ccm_decrypt(struct aead_request *req)
 	if (req->src != req->dst)
 	if (req->src != req->dst)
 		dst = pctx->dst;
 		dst = pctx->dst;
 
 
+	memcpy(iv, req->iv, 16);
+
 	skcipher_request_set_tfm(skreq, ctx->ctr);
 	skcipher_request_set_tfm(skreq, ctx->ctr);
 	skcipher_request_set_callback(skreq, pctx->flags,
 	skcipher_request_set_callback(skreq, pctx->flags,
 				      crypto_ccm_decrypt_done, req);
 				      crypto_ccm_decrypt_done, req);

+ 2 - 0
drivers/acpi/processor_idle.c

@@ -710,6 +710,8 @@ static DEFINE_RAW_SPINLOCK(c3_lock);
 static void acpi_idle_enter_bm(struct acpi_processor *pr,
 static void acpi_idle_enter_bm(struct acpi_processor *pr,
 			       struct acpi_processor_cx *cx, bool timer_bc)
 			       struct acpi_processor_cx *cx, bool timer_bc)
 {
 {
+	acpi_unlazy_tlb(smp_processor_id());
+
 	/*
 	/*
 	 * Must be done before busmaster disable as we might need to
 	 * Must be done before busmaster disable as we might need to
 	 * access HPET !
 	 * access HPET !

+ 28 - 0
drivers/acpi/sleep.c

@@ -160,6 +160,14 @@ static int __init init_nvs_nosave(const struct dmi_system_id *d)
 	return 0;
 	return 0;
 }
 }
 
 
+static bool acpi_sleep_no_lps0;
+
+static int __init init_no_lps0(const struct dmi_system_id *d)
+{
+	acpi_sleep_no_lps0 = true;
+	return 0;
+}
+
 static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
 static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
 	{
 	{
 	.callback = init_old_suspend_ordering,
 	.callback = init_old_suspend_ordering,
@@ -343,6 +351,19 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
 		DMI_MATCH(DMI_PRODUCT_NAME, "80E3"),
 		DMI_MATCH(DMI_PRODUCT_NAME, "80E3"),
 		},
 		},
 	},
 	},
+	/*
+	 * https://bugzilla.kernel.org/show_bug.cgi?id=196907
+	 * Some Dell XPS13 9360 cannot do suspend-to-idle using the Low Power
+	 * S0 Idle firmware interface.
+	 */
+	{
+	.callback = init_no_lps0,
+	.ident = "Dell XPS13 9360",
+	.matches = {
+		DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+		DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
+		},
+	},
 	{},
 	{},
 };
 };
 
 
@@ -485,6 +506,7 @@ static void acpi_pm_end(void)
 }
 }
 #else /* !CONFIG_ACPI_SLEEP */
 #else /* !CONFIG_ACPI_SLEEP */
 #define acpi_target_sleep_state	ACPI_STATE_S0
 #define acpi_target_sleep_state	ACPI_STATE_S0
+#define acpi_sleep_no_lps0	(false)
 static inline void acpi_sleep_dmi_check(void) {}
 static inline void acpi_sleep_dmi_check(void) {}
 #endif /* CONFIG_ACPI_SLEEP */
 #endif /* CONFIG_ACPI_SLEEP */
 
 
@@ -863,6 +885,12 @@ static int lps0_device_attach(struct acpi_device *adev,
 	if (lps0_device_handle)
 	if (lps0_device_handle)
 		return 0;
 		return 0;
 
 
+	if (acpi_sleep_no_lps0) {
+		acpi_handle_info(adev->handle,
+				 "Low Power S0 Idle interface disabled\n");
+		return 0;
+	}
+
 	if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
 	if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
 		return 0;
 		return 0;
 
 

+ 3 - 3
drivers/firmware/efi/libstub/Makefile

@@ -34,13 +34,14 @@ lib-y				:= efi-stub-helper.o gop.o secureboot.o
 lib-$(CONFIG_RESET_ATTACK_MITIGATION) += tpm.o
 lib-$(CONFIG_RESET_ATTACK_MITIGATION) += tpm.o
 
 
 # include the stub's generic dependencies from lib/ when building for ARM/arm64
 # include the stub's generic dependencies from lib/ when building for ARM/arm64
-arm-deps := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c sort.c
+arm-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c
+arm-deps-$(CONFIG_ARM64) += sort.c
 
 
 $(obj)/lib-%.o: $(srctree)/lib/%.c FORCE
 $(obj)/lib-%.o: $(srctree)/lib/%.c FORCE
 	$(call if_changed_rule,cc_o_c)
 	$(call if_changed_rule,cc_o_c)
 
 
 lib-$(CONFIG_EFI_ARMSTUB)	+= arm-stub.o fdt.o string.o random.o \
 lib-$(CONFIG_EFI_ARMSTUB)	+= arm-stub.o fdt.o string.o random.o \
-				   $(patsubst %.c,lib-%.o,$(arm-deps))
+				   $(patsubst %.c,lib-%.o,$(arm-deps-y))
 
 
 lib-$(CONFIG_ARM)		+= arm32-stub.o
 lib-$(CONFIG_ARM)		+= arm32-stub.o
 lib-$(CONFIG_ARM64)		+= arm64-stub.o
 lib-$(CONFIG_ARM64)		+= arm64-stub.o
@@ -91,5 +92,4 @@ quiet_cmd_stubcopy = STUBCPY $@
 # explicitly by the decompressor linker script.
 # explicitly by the decompressor linker script.
 #
 #
 STUBCOPY_FLAGS-$(CONFIG_ARM)	+= --rename-section .data=.data.efistub
 STUBCOPY_FLAGS-$(CONFIG_ARM)	+= --rename-section .data=.data.efistub
-STUBCOPY_RM-$(CONFIG_ARM)	+= -R ___ksymtab+sort -R ___kcrctab+sort
 STUBCOPY_RELOC-$(CONFIG_ARM)	:= R_ARM_ABS
 STUBCOPY_RELOC-$(CONFIG_ARM)	:= R_ARM_ABS

+ 5 - 2
drivers/firmware/efi/libstub/arm-stub.c

@@ -350,7 +350,9 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
 	 * The easiest way to find adjacent regions is to sort the memory map
 	 * The easiest way to find adjacent regions is to sort the memory map
 	 * before traversing it.
 	 * before traversing it.
 	 */
 	 */
-	sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc, NULL);
+	if (IS_ENABLED(CONFIG_ARM64))
+		sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc,
+		     NULL);
 
 
 	for (l = 0; l < map_size; l += desc_size, prev = in) {
 	for (l = 0; l < map_size; l += desc_size, prev = in) {
 		u64 paddr, size;
 		u64 paddr, size;
@@ -367,7 +369,8 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
 		 * a 4k page size kernel to kexec a 64k page size kernel and
 		 * a 4k page size kernel to kexec a 64k page size kernel and
 		 * vice versa.
 		 * vice versa.
 		 */
 		 */
-		if (!regions_are_adjacent(prev, in) ||
+		if ((IS_ENABLED(CONFIG_ARM64) &&
+		     !regions_are_adjacent(prev, in)) ||
 		    !regions_have_compatible_memory_type_attrs(prev, in)) {
 		    !regions_have_compatible_memory_type_attrs(prev, in)) {
 
 
 			paddr = round_down(in->phys_addr, SZ_64K);
 			paddr = round_down(in->phys_addr, SZ_64K);

+ 6 - 1
drivers/ide/ide-cd.c

@@ -867,11 +867,16 @@ static void msf_from_bcd(struct atapi_msf *msf)
 int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense)
 int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense)
 {
 {
 	struct cdrom_info *info = drive->driver_data;
 	struct cdrom_info *info = drive->driver_data;
-	struct cdrom_device_info *cdi = &info->devinfo;
+	struct cdrom_device_info *cdi;
 	unsigned char cmd[BLK_MAX_CDB];
 	unsigned char cmd[BLK_MAX_CDB];
 
 
 	ide_debug_log(IDE_DBG_FUNC, "enter");
 	ide_debug_log(IDE_DBG_FUNC, "enter");
 
 
+	if (!info)
+		return -EIO;
+
+	cdi = &info->devinfo;
+
 	memset(cmd, 0, BLK_MAX_CDB);
 	memset(cmd, 0, BLK_MAX_CDB);
 	cmd[0] = GPCMD_TEST_UNIT_READY;
 	cmd[0] = GPCMD_TEST_UNIT_READY;
 
 

+ 5 - 4
drivers/idle/intel_idle.c

@@ -913,15 +913,16 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev,
 	struct cpuidle_state *state = &drv->states[index];
 	struct cpuidle_state *state = &drv->states[index];
 	unsigned long eax = flg2MWAIT(state->flags);
 	unsigned long eax = flg2MWAIT(state->flags);
 	unsigned int cstate;
 	unsigned int cstate;
+	int cpu = smp_processor_id();
 
 
 	cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
 	cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
 
 
 	/*
 	/*
-	 * NB: if CPUIDLE_FLAG_TLB_FLUSHED is set, this idle transition
-	 * will probably flush the TLB.  It's not guaranteed to flush
-	 * the TLB, though, so it's not clear that we can do anything
-	 * useful with this knowledge.
+	 * leave_mm() to avoid costly and often unnecessary wakeups
+	 * for flushing the user TLB's associated with the active mm.
 	 */
 	 */
+	if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
+		leave_mm(cpu);
 
 
 	if (!(lapic_timer_reliable_states & (1 << (cstate))))
 	if (!(lapic_timer_reliable_states & (1 << (cstate))))
 		tick_broadcast_enter();
 		tick_broadcast_enter();

+ 1 - 0
drivers/input/sparse-keymap.c

@@ -255,6 +255,7 @@ void sparse_keymap_report_entry(struct input_dev *dev, const struct key_entry *k
 
 
 	case KE_VSW:
 	case KE_VSW:
 		input_report_switch(dev, ke->sw.code, value);
 		input_report_switch(dev, ke->sw.code, value);
+		input_sync(dev);
 		break;
 		break;
 	}
 	}
 }
 }

+ 1 - 0
drivers/input/touchscreen/ar1021_i2c.c

@@ -117,6 +117,7 @@ static int ar1021_i2c_probe(struct i2c_client *client,
 	input->open = ar1021_i2c_open;
 	input->open = ar1021_i2c_open;
 	input->close = ar1021_i2c_close;
 	input->close = ar1021_i2c_close;
 
 
+	__set_bit(INPUT_PROP_DIRECT, input->propbit);
 	input_set_capability(input, EV_KEY, BTN_TOUCH);
 	input_set_capability(input, EV_KEY, BTN_TOUCH);
 	input_set_abs_params(input, ABS_X, 0, AR1021_MAX_X, 0, 0);
 	input_set_abs_params(input, ABS_X, 0, AR1021_MAX_X, 0, 0);
 	input_set_abs_params(input, ABS_Y, 0, AR1021_MAX_Y, 0, 0);
 	input_set_abs_params(input, ABS_Y, 0, AR1021_MAX_Y, 0, 0);

+ 1 - 0
drivers/irqchip/irq-mvebu-gicp.c

@@ -194,6 +194,7 @@ static int mvebu_gicp_probe(struct platform_device *pdev)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
 	gicp->dev = &pdev->dev;
 	gicp->dev = &pdev->dev;
+	spin_lock_init(&gicp->spi_lock);
 
 
 	gicp->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	gicp->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!gicp->res)
 	if (!gicp->res)

+ 2 - 1
drivers/net/bonding/bond_main.c

@@ -2046,6 +2046,7 @@ static int bond_miimon_inspect(struct bonding *bond)
 
 
 	bond_for_each_slave_rcu(bond, slave, iter) {
 	bond_for_each_slave_rcu(bond, slave, iter) {
 		slave->new_link = BOND_LINK_NOCHANGE;
 		slave->new_link = BOND_LINK_NOCHANGE;
+		slave->link_new_state = slave->link;
 
 
 		link_state = bond_check_dev_link(bond, slave->dev, 0);
 		link_state = bond_check_dev_link(bond, slave->dev, 0);
 
 
@@ -3267,7 +3268,7 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
 	hash ^= (hash >> 16);
 	hash ^= (hash >> 16);
 	hash ^= (hash >> 8);
 	hash ^= (hash >> 8);
 
 
-	return hash;
+	return hash >> 1;
 }
 }
 
 
 /*-------------------------- Device entry points ----------------------------*/
 /*-------------------------- Device entry points ----------------------------*/

+ 3 - 3
drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h

@@ -37,7 +37,7 @@
 
 
 #define T4FW_VERSION_MAJOR 0x01
 #define T4FW_VERSION_MAJOR 0x01
 #define T4FW_VERSION_MINOR 0x10
 #define T4FW_VERSION_MINOR 0x10
-#define T4FW_VERSION_MICRO 0x2D
+#define T4FW_VERSION_MICRO 0x3F
 #define T4FW_VERSION_BUILD 0x00
 #define T4FW_VERSION_BUILD 0x00
 
 
 #define T4FW_MIN_VERSION_MAJOR 0x01
 #define T4FW_MIN_VERSION_MAJOR 0x01
@@ -46,7 +46,7 @@
 
 
 #define T5FW_VERSION_MAJOR 0x01
 #define T5FW_VERSION_MAJOR 0x01
 #define T5FW_VERSION_MINOR 0x10
 #define T5FW_VERSION_MINOR 0x10
-#define T5FW_VERSION_MICRO 0x2D
+#define T5FW_VERSION_MICRO 0x3F
 #define T5FW_VERSION_BUILD 0x00
 #define T5FW_VERSION_BUILD 0x00
 
 
 #define T5FW_MIN_VERSION_MAJOR 0x00
 #define T5FW_MIN_VERSION_MAJOR 0x00
@@ -55,7 +55,7 @@
 
 
 #define T6FW_VERSION_MAJOR 0x01
 #define T6FW_VERSION_MAJOR 0x01
 #define T6FW_VERSION_MINOR 0x10
 #define T6FW_VERSION_MINOR 0x10
-#define T6FW_VERSION_MICRO 0x2D
+#define T6FW_VERSION_MICRO 0x3F
 #define T6FW_VERSION_BUILD 0x00
 #define T6FW_VERSION_BUILD 0x00
 
 
 #define T6FW_MIN_VERSION_MAJOR 0x00
 #define T6FW_MIN_VERSION_MAJOR 0x00

+ 4 - 0
drivers/net/ethernet/marvell/mvpp2.c

@@ -6941,6 +6941,9 @@ static int mvpp2_irqs_init(struct mvpp2_port *port)
 	for (i = 0; i < port->nqvecs; i++) {
 	for (i = 0; i < port->nqvecs; i++) {
 		struct mvpp2_queue_vector *qv = port->qvecs + i;
 		struct mvpp2_queue_vector *qv = port->qvecs + i;
 
 
+		if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
+			irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
+
 		err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
 		err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
 		if (err)
 		if (err)
 			goto err;
 			goto err;
@@ -6970,6 +6973,7 @@ static void mvpp2_irqs_deinit(struct mvpp2_port *port)
 		struct mvpp2_queue_vector *qv = port->qvecs + i;
 		struct mvpp2_queue_vector *qv = port->qvecs + i;
 
 
 		irq_set_affinity_hint(qv->irq, NULL);
 		irq_set_affinity_hint(qv->irq, NULL);
+		irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
 		free_irq(qv->irq, qv);
 		free_irq(qv->irq, qv);
 	}
 	}
 }
 }

+ 8 - 5
drivers/net/ethernet/mellanox/mlx5/core/en_fs.c

@@ -365,21 +365,24 @@ static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
 				    struct mlx5e_l2_hash_node *hn)
 				    struct mlx5e_l2_hash_node *hn)
 {
 {
 	u8 action = hn->action;
 	u8 action = hn->action;
+	u8 mac_addr[ETH_ALEN];
 	int l2_err = 0;
 	int l2_err = 0;
 
 
+	ether_addr_copy(mac_addr, hn->ai.addr);
+
 	switch (action) {
 	switch (action) {
 	case MLX5E_ACTION_ADD:
 	case MLX5E_ACTION_ADD:
 		mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
 		mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
-		if (!is_multicast_ether_addr(hn->ai.addr)) {
-			l2_err = mlx5_mpfs_add_mac(priv->mdev, hn->ai.addr);
+		if (!is_multicast_ether_addr(mac_addr)) {
+			l2_err = mlx5_mpfs_add_mac(priv->mdev, mac_addr);
 			hn->mpfs = !l2_err;
 			hn->mpfs = !l2_err;
 		}
 		}
 		hn->action = MLX5E_ACTION_NONE;
 		hn->action = MLX5E_ACTION_NONE;
 		break;
 		break;
 
 
 	case MLX5E_ACTION_DEL:
 	case MLX5E_ACTION_DEL:
-		if (!is_multicast_ether_addr(hn->ai.addr) && hn->mpfs)
-			l2_err = mlx5_mpfs_del_mac(priv->mdev, hn->ai.addr);
+		if (!is_multicast_ether_addr(mac_addr) && hn->mpfs)
+			l2_err = mlx5_mpfs_del_mac(priv->mdev, mac_addr);
 		mlx5e_del_l2_flow_rule(priv, &hn->ai);
 		mlx5e_del_l2_flow_rule(priv, &hn->ai);
 		mlx5e_del_l2_from_hash(hn);
 		mlx5e_del_l2_from_hash(hn);
 		break;
 		break;
@@ -387,7 +390,7 @@ static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
 
 
 	if (l2_err)
 	if (l2_err)
 		netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n",
 		netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n",
-			    action == MLX5E_ACTION_ADD ? "add" : "del", hn->ai.addr, l2_err);
+			    action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err);
 }
 }
 
 
 static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
 static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)

+ 2 - 2
drivers/net/usb/asix_devices.c

@@ -626,7 +626,7 @@ static int asix_suspend(struct usb_interface *intf, pm_message_t message)
 	struct usbnet *dev = usb_get_intfdata(intf);
 	struct usbnet *dev = usb_get_intfdata(intf);
 	struct asix_common_private *priv = dev->driver_priv;
 	struct asix_common_private *priv = dev->driver_priv;
 
 
-	if (priv->suspend)
+	if (priv && priv->suspend)
 		priv->suspend(dev);
 		priv->suspend(dev);
 
 
 	return usbnet_suspend(intf, message);
 	return usbnet_suspend(intf, message);
@@ -678,7 +678,7 @@ static int asix_resume(struct usb_interface *intf)
 	struct usbnet *dev = usb_get_intfdata(intf);
 	struct usbnet *dev = usb_get_intfdata(intf);
 	struct asix_common_private *priv = dev->driver_priv;
 	struct asix_common_private *priv = dev->driver_priv;
 
 
-	if (priv->resume)
+	if (priv && priv->resume)
 		priv->resume(dev);
 		priv->resume(dev);
 
 
 	return usbnet_resume(intf);
 	return usbnet_resume(intf);

+ 1 - 1
drivers/net/usb/cdc_ether.c

@@ -230,7 +230,7 @@ skip:
 			goto bad_desc;
 			goto bad_desc;
 	}
 	}
 
 
-	if (header.usb_cdc_ether_desc) {
+	if (header.usb_cdc_ether_desc && info->ether->wMaxSegmentSize) {
 		dev->hard_mtu = le16_to_cpu(info->ether->wMaxSegmentSize);
 		dev->hard_mtu = le16_to_cpu(info->ether->wMaxSegmentSize);
 		/* because of Zaurus, we may be ignoring the host
 		/* because of Zaurus, we may be ignoring the host
 		 * side link address we were given.
 		 * side link address we were given.

+ 2 - 1
drivers/net/usb/qmi_wwan.c

@@ -499,6 +499,7 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
 		return 1;
 		return 1;
 	}
 	}
 	if (rawip) {
 	if (rawip) {
+		skb_reset_mac_header(skb);
 		skb->dev = dev->net; /* normally set by eth_type_trans */
 		skb->dev = dev->net; /* normally set by eth_type_trans */
 		skb->protocol = proto;
 		skb->protocol = proto;
 		return 1;
 		return 1;
@@ -681,7 +682,7 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
 	}
 	}
 
 
 	/* errors aren't fatal - we can live with the dynamic address */
 	/* errors aren't fatal - we can live with the dynamic address */
-	if (cdc_ether) {
+	if (cdc_ether && cdc_ether->wMaxSegmentSize) {
 		dev->hard_mtu = le16_to_cpu(cdc_ether->wMaxSegmentSize);
 		dev->hard_mtu = le16_to_cpu(cdc_ether->wMaxSegmentSize);
 		usbnet_get_ethernet_addr(dev, cdc_ether->iMACAddress);
 		usbnet_get_ethernet_addr(dev, cdc_ether->iMACAddress);
 	}
 	}

+ 0 - 3
drivers/scsi/scsi_lib.c

@@ -2685,7 +2685,6 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
 
 
 	}
 	}
 	sdev->sdev_state = state;
 	sdev->sdev_state = state;
-	sysfs_notify(&sdev->sdev_gendev.kobj, NULL, "state");
 	return 0;
 	return 0;
 
 
  illegal:
  illegal:
@@ -3109,7 +3108,6 @@ int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
 	case SDEV_BLOCK:
 	case SDEV_BLOCK:
 	case SDEV_TRANSPORT_OFFLINE:
 	case SDEV_TRANSPORT_OFFLINE:
 		sdev->sdev_state = new_state;
 		sdev->sdev_state = new_state;
-		sysfs_notify(&sdev->sdev_gendev.kobj, NULL, "state");
 		break;
 		break;
 	case SDEV_CREATED_BLOCK:
 	case SDEV_CREATED_BLOCK:
 		if (new_state == SDEV_TRANSPORT_OFFLINE ||
 		if (new_state == SDEV_TRANSPORT_OFFLINE ||
@@ -3117,7 +3115,6 @@ int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
 			sdev->sdev_state = new_state;
 			sdev->sdev_state = new_state;
 		else
 		else
 			sdev->sdev_state = SDEV_CREATED;
 			sdev->sdev_state = SDEV_CREATED;
-		sysfs_notify(&sdev->sdev_gendev.kobj, NULL, "state");
 		break;
 		break;
 	case SDEV_CANCEL:
 	case SDEV_CANCEL:
 	case SDEV_OFFLINE:
 	case SDEV_OFFLINE:

+ 1 - 4
drivers/scsi/scsi_transport_srp.c

@@ -556,11 +556,8 @@ int srp_reconnect_rport(struct srp_rport *rport)
 		 */
 		 */
 		shost_for_each_device(sdev, shost) {
 		shost_for_each_device(sdev, shost) {
 			mutex_lock(&sdev->state_mutex);
 			mutex_lock(&sdev->state_mutex);
-			if (sdev->sdev_state == SDEV_OFFLINE) {
+			if (sdev->sdev_state == SDEV_OFFLINE)
 				sdev->sdev_state = SDEV_RUNNING;
 				sdev->sdev_state = SDEV_RUNNING;
-				sysfs_notify(&sdev->sdev_gendev.kobj,
-					     NULL, "state");
-			}
 			mutex_unlock(&sdev->state_mutex);
 			mutex_unlock(&sdev->state_mutex);
 		}
 		}
 	} else if (rport->state == SRP_RPORT_RUNNING) {
 	} else if (rport->state == SRP_RPORT_RUNNING) {

+ 2 - 2
include/linux/compiler.h

@@ -191,13 +191,13 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
 	asm("%c0:\n\t"							\
 	asm("%c0:\n\t"							\
 	    ".pushsection .discard.reachable\n\t"			\
 	    ".pushsection .discard.reachable\n\t"			\
 	    ".long %c0b - .\n\t"					\
 	    ".long %c0b - .\n\t"					\
-	    ".popsection\n\t" : : "i" (__LINE__));			\
+	    ".popsection\n\t" : : "i" (__COUNTER__));			\
 })
 })
 #define annotate_unreachable() ({					\
 #define annotate_unreachable() ({					\
 	asm("%c0:\n\t"							\
 	asm("%c0:\n\t"							\
 	    ".pushsection .discard.unreachable\n\t"			\
 	    ".pushsection .discard.unreachable\n\t"			\
 	    ".long %c0b - .\n\t"					\
 	    ".long %c0b - .\n\t"					\
-	    ".popsection\n\t" : : "i" (__LINE__));			\
+	    ".popsection\n\t" : : "i" (__COUNTER__));			\
 })
 })
 #define ASM_UNREACHABLE							\
 #define ASM_UNREACHABLE							\
 	"999:\n\t"							\
 	"999:\n\t"							\

+ 7 - 0
include/linux/skbuff.h

@@ -3841,6 +3841,13 @@ static inline void nf_reset_trace(struct sk_buff *skb)
 #endif
 #endif
 }
 }
 
 
+static inline void ipvs_reset(struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_IP_VS)
+	skb->ipvs_property = 0;
+#endif
+}
+
 /* Note: This doesn't put any conntrack and bridge info in dst. */
 /* Note: This doesn't put any conntrack and bridge info in dst. */
 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
 			     bool copy)
 			     bool copy)

+ 1 - 3
include/net/act_api.h

@@ -14,7 +14,6 @@
 struct tcf_idrinfo {
 struct tcf_idrinfo {
 	spinlock_t	lock;
 	spinlock_t	lock;
 	struct idr	action_idr;
 	struct idr	action_idr;
-	struct net	*net;
 };
 };
 
 
 struct tc_action_ops;
 struct tc_action_ops;
@@ -105,7 +104,7 @@ struct tc_action_net {
 
 
 static inline
 static inline
 int tc_action_net_init(struct tc_action_net *tn,
 int tc_action_net_init(struct tc_action_net *tn,
-		       const struct tc_action_ops *ops, struct net *net)
+		       const struct tc_action_ops *ops)
 {
 {
 	int err = 0;
 	int err = 0;
 
 
@@ -113,7 +112,6 @@ int tc_action_net_init(struct tc_action_net *tn,
 	if (!tn->idrinfo)
 	if (!tn->idrinfo)
 		return -ENOMEM;
 		return -ENOMEM;
 	tn->ops = ops;
 	tn->ops = ops;
-	tn->idrinfo->net = net;
 	spin_lock_init(&tn->idrinfo->lock);
 	spin_lock_init(&tn->idrinfo->lock);
 	idr_init(&tn->idrinfo->action_idr);
 	idr_init(&tn->idrinfo->action_idr);
 	return err;
 	return err;

+ 24 - 0
include/net/pkt_cls.h

@@ -231,6 +231,7 @@ struct tcf_exts {
 	__u32	type; /* for backward compat(TCA_OLD_COMPAT) */
 	__u32	type; /* for backward compat(TCA_OLD_COMPAT) */
 	int nr_actions;
 	int nr_actions;
 	struct tc_action **actions;
 	struct tc_action **actions;
+	struct net *net;
 #endif
 #endif
 	/* Map to export classifier specific extension TLV types to the
 	/* Map to export classifier specific extension TLV types to the
 	 * generic extensions API. Unsupported extensions must be set to 0.
 	 * generic extensions API. Unsupported extensions must be set to 0.
@@ -244,6 +245,7 @@ static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
 #ifdef CONFIG_NET_CLS_ACT
 #ifdef CONFIG_NET_CLS_ACT
 	exts->type = 0;
 	exts->type = 0;
 	exts->nr_actions = 0;
 	exts->nr_actions = 0;
+	exts->net = NULL;
 	exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
 	exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
 				GFP_KERNEL);
 				GFP_KERNEL);
 	if (!exts->actions)
 	if (!exts->actions)
@@ -254,6 +256,28 @@ static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
 	return 0;
 	return 0;
 }
 }
 
 
+/* Return false if the netns is being destroyed in cleanup_net(). Callers
+ * need to do cleanup synchronously in this case, otherwise may race with
+ * tc_action_net_exit(). Return true for other cases.
+ */
+static inline bool tcf_exts_get_net(struct tcf_exts *exts)
+{
+#ifdef CONFIG_NET_CLS_ACT
+	exts->net = maybe_get_net(exts->net);
+	return exts->net != NULL;
+#else
+	return true;
+#endif
+}
+
+static inline void tcf_exts_put_net(struct tcf_exts *exts)
+{
+#ifdef CONFIG_NET_CLS_ACT
+	if (exts->net)
+		put_net(exts->net);
+#endif
+}
+
 static inline void tcf_exts_to_list(const struct tcf_exts *exts,
 static inline void tcf_exts_to_list(const struct tcf_exts *exts,
 				    struct list_head *actions)
 				    struct list_head *actions)
 {
 {

+ 2 - 1
include/sound/seq_kernel.h

@@ -49,7 +49,8 @@ typedef union snd_seq_timestamp snd_seq_timestamp_t;
 #define SNDRV_SEQ_DEFAULT_CLIENT_EVENTS	200
 #define SNDRV_SEQ_DEFAULT_CLIENT_EVENTS	200
 
 
 /* max delivery path length */
 /* max delivery path length */
-#define SNDRV_SEQ_MAX_HOPS		10
+/* NOTE: this shouldn't be greater than MAX_LOCKDEP_SUBCLASSES */
+#define SNDRV_SEQ_MAX_HOPS		8
 
 
 /* max size of event size */
 /* max size of event size */
 #define SNDRV_SEQ_MAX_EVENT_LEN		0x3fffffff
 #define SNDRV_SEQ_MAX_EVENT_LEN		0x3fffffff

+ 2 - 0
include/sound/timer.h

@@ -90,6 +90,8 @@ struct snd_timer {
 	struct list_head ack_list_head;
 	struct list_head ack_list_head;
 	struct list_head sack_list_head; /* slow ack list head */
 	struct list_head sack_list_head; /* slow ack list head */
 	struct tasklet_struct task_queue;
 	struct tasklet_struct task_queue;
+	int max_instances;	/* upper limit of timer instances */
+	int num_instances;	/* current number of timer instances */
 };
 };
 
 
 struct snd_timer_instance {
 struct snd_timer_instance {

+ 3 - 3
include/uapi/sound/asound.h

@@ -94,7 +94,7 @@ enum {
 	SNDRV_HWDEP_IFACE_VX,		/* Digigram VX cards */
 	SNDRV_HWDEP_IFACE_VX,		/* Digigram VX cards */
 	SNDRV_HWDEP_IFACE_MIXART,	/* Digigram miXart cards */
 	SNDRV_HWDEP_IFACE_MIXART,	/* Digigram miXart cards */
 	SNDRV_HWDEP_IFACE_USX2Y,	/* Tascam US122, US224 & US428 usb */
 	SNDRV_HWDEP_IFACE_USX2Y,	/* Tascam US122, US224 & US428 usb */
-	SNDRV_HWDEP_IFACE_EMUX_WAVETABLE, /* EmuX wavetable */	
+	SNDRV_HWDEP_IFACE_EMUX_WAVETABLE, /* EmuX wavetable */
 	SNDRV_HWDEP_IFACE_BLUETOOTH,	/* Bluetooth audio */
 	SNDRV_HWDEP_IFACE_BLUETOOTH,	/* Bluetooth audio */
 	SNDRV_HWDEP_IFACE_USX2Y_PCM,	/* Tascam US122, US224 & US428 rawusb pcm */
 	SNDRV_HWDEP_IFACE_USX2Y_PCM,	/* Tascam US122, US224 & US428 rawusb pcm */
 	SNDRV_HWDEP_IFACE_PCXHR,	/* Digigram PCXHR */
 	SNDRV_HWDEP_IFACE_PCXHR,	/* Digigram PCXHR */
@@ -384,7 +384,7 @@ struct snd_mask {
 
 
 struct snd_pcm_hw_params {
 struct snd_pcm_hw_params {
 	unsigned int flags;
 	unsigned int flags;
-	struct snd_mask masks[SNDRV_PCM_HW_PARAM_LAST_MASK - 
+	struct snd_mask masks[SNDRV_PCM_HW_PARAM_LAST_MASK -
 			       SNDRV_PCM_HW_PARAM_FIRST_MASK + 1];
 			       SNDRV_PCM_HW_PARAM_FIRST_MASK + 1];
 	struct snd_mask mres[5];	/* reserved masks */
 	struct snd_mask mres[5];	/* reserved masks */
 	struct snd_interval intervals[SNDRV_PCM_HW_PARAM_LAST_INTERVAL -
 	struct snd_interval intervals[SNDRV_PCM_HW_PARAM_LAST_INTERVAL -
@@ -857,7 +857,7 @@ typedef int __bitwise snd_ctl_elem_iface_t;
 #define SNDRV_CTL_ELEM_ACCESS_INACTIVE		(1<<8)	/* control does actually nothing, but may be updated */
 #define SNDRV_CTL_ELEM_ACCESS_INACTIVE		(1<<8)	/* control does actually nothing, but may be updated */
 #define SNDRV_CTL_ELEM_ACCESS_LOCK		(1<<9)	/* write lock */
 #define SNDRV_CTL_ELEM_ACCESS_LOCK		(1<<9)	/* write lock */
 #define SNDRV_CTL_ELEM_ACCESS_OWNER		(1<<10)	/* write lock owner */
 #define SNDRV_CTL_ELEM_ACCESS_OWNER		(1<<10)	/* write lock owner */
-#define SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK	(1<<28)	/* kernel use a TLV callback */ 
+#define SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK	(1<<28)	/* kernel use a TLV callback */
 #define SNDRV_CTL_ELEM_ACCESS_USER		(1<<29) /* user space element */
 #define SNDRV_CTL_ELEM_ACCESS_USER		(1<<29) /* user space element */
 /* bits 30 and 31 are obsoleted (for indirect access) */
 /* bits 30 and 31 are obsoleted (for indirect access) */
 
 

+ 4 - 2
kernel/events/core.c

@@ -901,9 +901,11 @@ list_update_cgroup_event(struct perf_event *event,
 	cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
 	cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
 	/* cpuctx->cgrp is NULL unless a cgroup event is active in this CPU .*/
 	/* cpuctx->cgrp is NULL unless a cgroup event is active in this CPU .*/
 	if (add) {
 	if (add) {
+		struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
+
 		list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list));
 		list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list));
-		if (perf_cgroup_from_task(current, ctx) == event->cgrp)
-			cpuctx->cgrp = event->cgrp;
+		if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
+			cpuctx->cgrp = cgrp;
 	} else {
 	} else {
 		list_del(cpuctx_entry);
 		list_del(cpuctx_entry);
 		cpuctx->cgrp = NULL;
 		cpuctx->cgrp = NULL;

+ 20 - 3
kernel/futex.c

@@ -903,11 +903,27 @@ void exit_pi_state_list(struct task_struct *curr)
 	 */
 	 */
 	raw_spin_lock_irq(&curr->pi_lock);
 	raw_spin_lock_irq(&curr->pi_lock);
 	while (!list_empty(head)) {
 	while (!list_empty(head)) {
-
 		next = head->next;
 		next = head->next;
 		pi_state = list_entry(next, struct futex_pi_state, list);
 		pi_state = list_entry(next, struct futex_pi_state, list);
 		key = pi_state->key;
 		key = pi_state->key;
 		hb = hash_futex(&key);
 		hb = hash_futex(&key);
+
+		/*
+		 * We can race against put_pi_state() removing itself from the
+		 * list (a waiter going away). put_pi_state() will first
+		 * decrement the reference count and then modify the list, so
+		 * its possible to see the list entry but fail this reference
+		 * acquire.
+		 *
+		 * In that case; drop the locks to let put_pi_state() make
+		 * progress and retry the loop.
+		 */
+		if (!atomic_inc_not_zero(&pi_state->refcount)) {
+			raw_spin_unlock_irq(&curr->pi_lock);
+			cpu_relax();
+			raw_spin_lock_irq(&curr->pi_lock);
+			continue;
+		}
 		raw_spin_unlock_irq(&curr->pi_lock);
 		raw_spin_unlock_irq(&curr->pi_lock);
 
 
 		spin_lock(&hb->lock);
 		spin_lock(&hb->lock);
@@ -918,8 +934,10 @@ void exit_pi_state_list(struct task_struct *curr)
 		 * task still owns the PI-state:
 		 * task still owns the PI-state:
 		 */
 		 */
 		if (head->next != next) {
 		if (head->next != next) {
+			/* retain curr->pi_lock for the loop invariant */
 			raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
 			raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
 			spin_unlock(&hb->lock);
 			spin_unlock(&hb->lock);
+			put_pi_state(pi_state);
 			continue;
 			continue;
 		}
 		}
 
 
@@ -927,9 +945,8 @@ void exit_pi_state_list(struct task_struct *curr)
 		WARN_ON(list_empty(&pi_state->list));
 		WARN_ON(list_empty(&pi_state->list));
 		list_del_init(&pi_state->list);
 		list_del_init(&pi_state->list);
 		pi_state->owner = NULL;
 		pi_state->owner = NULL;
-		raw_spin_unlock(&curr->pi_lock);
 
 
-		get_pi_state(pi_state);
+		raw_spin_unlock(&curr->pi_lock);
 		raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
 		raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
 		spin_unlock(&hb->lock);
 		spin_unlock(&hb->lock);
 
 

+ 1 - 5
kernel/sched/cpufreq_schedutil.c

@@ -649,6 +649,7 @@ static int sugov_start(struct cpufreq_policy *policy)
 		struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
 		struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
 
 
 		memset(sg_cpu, 0, sizeof(*sg_cpu));
 		memset(sg_cpu, 0, sizeof(*sg_cpu));
+		sg_cpu->cpu = cpu;
 		sg_cpu->sg_policy = sg_policy;
 		sg_cpu->sg_policy = sg_policy;
 		sg_cpu->flags = SCHED_CPUFREQ_RT;
 		sg_cpu->flags = SCHED_CPUFREQ_RT;
 		sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
 		sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
@@ -714,11 +715,6 @@ struct cpufreq_governor *cpufreq_default_governor(void)
 
 
 static int __init sugov_register(void)
 static int __init sugov_register(void)
 {
 {
-	int cpu;
-
-	for_each_possible_cpu(cpu)
-		per_cpu(sugov_cpu, cpu).cpu = cpu;
-
 	return cpufreq_register_governor(&schedutil_gov);
 	return cpufreq_register_governor(&schedutil_gov);
 }
 }
 fs_initcall(sugov_register);
 fs_initcall(sugov_register);

+ 10 - 5
kernel/watchdog_hld.c

@@ -13,6 +13,7 @@
 #define pr_fmt(fmt) "NMI watchdog: " fmt
 #define pr_fmt(fmt) "NMI watchdog: " fmt
 
 
 #include <linux/nmi.h>
 #include <linux/nmi.h>
+#include <linux/atomic.h>
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/sched/debug.h>
 #include <linux/sched/debug.h>
 
 
@@ -22,10 +23,11 @@
 static DEFINE_PER_CPU(bool, hard_watchdog_warn);
 static DEFINE_PER_CPU(bool, hard_watchdog_warn);
 static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
 static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
+static DEFINE_PER_CPU(struct perf_event *, dead_event);
 static struct cpumask dead_events_mask;
 static struct cpumask dead_events_mask;
 
 
 static unsigned long hardlockup_allcpu_dumped;
 static unsigned long hardlockup_allcpu_dumped;
-static unsigned int watchdog_cpus;
+static atomic_t watchdog_cpus = ATOMIC_INIT(0);
 
 
 void arch_touch_nmi_watchdog(void)
 void arch_touch_nmi_watchdog(void)
 {
 {
@@ -189,7 +191,8 @@ void hardlockup_detector_perf_enable(void)
 	if (hardlockup_detector_event_create())
 	if (hardlockup_detector_event_create())
 		return;
 		return;
 
 
-	if (!watchdog_cpus++)
+	/* use original value for check */
+	if (!atomic_fetch_inc(&watchdog_cpus))
 		pr_info("Enabled. Permanently consumes one hw-PMU counter.\n");
 		pr_info("Enabled. Permanently consumes one hw-PMU counter.\n");
 
 
 	perf_event_enable(this_cpu_read(watchdog_ev));
 	perf_event_enable(this_cpu_read(watchdog_ev));
@@ -204,8 +207,10 @@ void hardlockup_detector_perf_disable(void)
 
 
 	if (event) {
 	if (event) {
 		perf_event_disable(event);
 		perf_event_disable(event);
+		this_cpu_write(watchdog_ev, NULL);
+		this_cpu_write(dead_event, event);
 		cpumask_set_cpu(smp_processor_id(), &dead_events_mask);
 		cpumask_set_cpu(smp_processor_id(), &dead_events_mask);
-		watchdog_cpus--;
+		atomic_dec(&watchdog_cpus);
 	}
 	}
 }
 }
 
 
@@ -219,7 +224,7 @@ void hardlockup_detector_perf_cleanup(void)
 	int cpu;
 	int cpu;
 
 
 	for_each_cpu(cpu, &dead_events_mask) {
 	for_each_cpu(cpu, &dead_events_mask) {
-		struct perf_event *event = per_cpu(watchdog_ev, cpu);
+		struct perf_event *event = per_cpu(dead_event, cpu);
 
 
 		/*
 		/*
 		 * Required because for_each_cpu() reports  unconditionally
 		 * Required because for_each_cpu() reports  unconditionally
@@ -227,7 +232,7 @@ void hardlockup_detector_perf_cleanup(void)
 		 */
 		 */
 		if (event)
 		if (event)
 			perf_event_release_kernel(event);
 			perf_event_release_kernel(event);
-		per_cpu(watchdog_ev, cpu) = NULL;
+		per_cpu(dead_event, cpu) = NULL;
 	}
 	}
 	cpumask_clear(&dead_events_mask);
 	cpumask_clear(&dead_events_mask);
 }
 }

+ 2 - 1
kernel/workqueue_internal.h

@@ -10,6 +10,7 @@
 
 
 #include <linux/workqueue.h>
 #include <linux/workqueue.h>
 #include <linux/kthread.h>
 #include <linux/kthread.h>
+#include <linux/preempt.h>
 
 
 struct worker_pool;
 struct worker_pool;
 
 
@@ -60,7 +61,7 @@ struct worker {
  */
  */
 static inline struct worker *current_wq_worker(void)
 static inline struct worker *current_wq_worker(void)
 {
 {
-	if (current->flags & PF_WQ_WORKER)
+	if (in_task() && (current->flags & PF_WQ_WORKER))
 		return kthread_data(current);
 		return kthread_data(current);
 	return NULL;
 	return NULL;
 }
 }

+ 2 - 2
lib/asn1_decoder.c

@@ -228,7 +228,7 @@ next_op:
 		hdr = 2;
 		hdr = 2;
 
 
 		/* Extract a tag from the data */
 		/* Extract a tag from the data */
-		if (unlikely(dp >= datalen - 1))
+		if (unlikely(datalen - dp < 2))
 			goto data_overrun_error;
 			goto data_overrun_error;
 		tag = data[dp++];
 		tag = data[dp++];
 		if (unlikely((tag & 0x1f) == ASN1_LONG_TAG))
 		if (unlikely((tag & 0x1f) == ASN1_LONG_TAG))
@@ -274,7 +274,7 @@ next_op:
 				int n = len - 0x80;
 				int n = len - 0x80;
 				if (unlikely(n > 2))
 				if (unlikely(n > 2))
 					goto length_too_long;
 					goto length_too_long;
-				if (unlikely(dp >= datalen - n))
+				if (unlikely(n > datalen - dp))
 					goto data_overrun_error;
 					goto data_overrun_error;
 				hdr += n;
 				hdr += n;
 				for (len = 0; n > 0; n--) {
 				for (len = 0; n > 0; n--) {

+ 1 - 0
net/core/skbuff.c

@@ -4869,6 +4869,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
 	if (!xnet)
 	if (!xnet)
 		return;
 		return;
 
 
+	ipvs_reset(skb);
 	skb_orphan(skb);
 	skb_orphan(skb);
 	skb->mark = 0;
 	skb->mark = 0;
 }
 }

+ 1 - 1
net/ipv4/tcp_input.c

@@ -100,7 +100,7 @@ int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
 
 
 #define FLAG_ACKED		(FLAG_DATA_ACKED|FLAG_SYN_ACKED)
 #define FLAG_ACKED		(FLAG_DATA_ACKED|FLAG_SYN_ACKED)
 #define FLAG_NOT_DUP		(FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
 #define FLAG_NOT_DUP		(FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
-#define FLAG_CA_ALERT		(FLAG_DATA_SACKED|FLAG_ECE)
+#define FLAG_CA_ALERT		(FLAG_DATA_SACKED|FLAG_ECE|FLAG_DSACKING_ACK)
 #define FLAG_FORWARD_PROGRESS	(FLAG_ACKED|FLAG_DATA_SACKED)
 #define FLAG_FORWARD_PROGRESS	(FLAG_ACKED|FLAG_DATA_SACKED)
 
 
 #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
 #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)

+ 9 - 15
net/l2tp/l2tp_ip.c

@@ -123,6 +123,7 @@ static int l2tp_ip_recv(struct sk_buff *skb)
 	unsigned char *ptr, *optr;
 	unsigned char *ptr, *optr;
 	struct l2tp_session *session;
 	struct l2tp_session *session;
 	struct l2tp_tunnel *tunnel = NULL;
 	struct l2tp_tunnel *tunnel = NULL;
+	struct iphdr *iph;
 	int length;
 	int length;
 
 
 	if (!pskb_may_pull(skb, 4))
 	if (!pskb_may_pull(skb, 4))
@@ -178,24 +179,17 @@ pass_up:
 		goto discard;
 		goto discard;
 
 
 	tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
 	tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
-	tunnel = l2tp_tunnel_find(net, tunnel_id);
-	if (tunnel) {
-		sk = tunnel->sock;
-		sock_hold(sk);
-	} else {
-		struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
-
-		read_lock_bh(&l2tp_ip_lock);
-		sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr,
-					   inet_iif(skb), tunnel_id);
-		if (!sk) {
-			read_unlock_bh(&l2tp_ip_lock);
-			goto discard;
-		}
+	iph = (struct iphdr *)skb_network_header(skb);
 
 
-		sock_hold(sk);
+	read_lock_bh(&l2tp_ip_lock);
+	sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, inet_iif(skb),
+				   tunnel_id);
+	if (!sk) {
 		read_unlock_bh(&l2tp_ip_lock);
 		read_unlock_bh(&l2tp_ip_lock);
+		goto discard;
 	}
 	}
+	sock_hold(sk);
+	read_unlock_bh(&l2tp_ip_lock);
 
 
 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
 		goto discard_put;
 		goto discard_put;

+ 9 - 15
net/l2tp/l2tp_ip6.c

@@ -136,6 +136,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
 	unsigned char *ptr, *optr;
 	unsigned char *ptr, *optr;
 	struct l2tp_session *session;
 	struct l2tp_session *session;
 	struct l2tp_tunnel *tunnel = NULL;
 	struct l2tp_tunnel *tunnel = NULL;
+	struct ipv6hdr *iph;
 	int length;
 	int length;
 
 
 	if (!pskb_may_pull(skb, 4))
 	if (!pskb_may_pull(skb, 4))
@@ -192,24 +193,17 @@ pass_up:
 		goto discard;
 		goto discard;
 
 
 	tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
 	tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
-	tunnel = l2tp_tunnel_find(net, tunnel_id);
-	if (tunnel) {
-		sk = tunnel->sock;
-		sock_hold(sk);
-	} else {
-		struct ipv6hdr *iph = ipv6_hdr(skb);
-
-		read_lock_bh(&l2tp_ip6_lock);
-		sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr,
-					    inet6_iif(skb), tunnel_id);
-		if (!sk) {
-			read_unlock_bh(&l2tp_ip6_lock);
-			goto discard;
-		}
+	iph = ipv6_hdr(skb);
 
 
-		sock_hold(sk);
+	read_lock_bh(&l2tp_ip6_lock);
+	sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr,
+				    inet6_iif(skb), tunnel_id);
+	if (!sk) {
 		read_unlock_bh(&l2tp_ip6_lock);
 		read_unlock_bh(&l2tp_ip6_lock);
+		goto discard;
 	}
 	}
+	sock_hold(sk);
+	read_unlock_bh(&l2tp_ip6_lock);
 
 
 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
 		goto discard_put;
 		goto discard_put;

+ 1 - 1
net/qrtr/qrtr.c

@@ -1120,7 +1120,7 @@ static int __init qrtr_proto_init(void)
 
 
 	return 0;
 	return 0;
 }
 }
-module_init(qrtr_proto_init);
+postcore_initcall(qrtr_proto_init);
 
 
 static void __exit qrtr_proto_fini(void)
 static void __exit qrtr_proto_fini(void)
 {
 {

+ 0 - 2
net/sched/act_api.c

@@ -80,7 +80,6 @@ static void tcf_idr_remove(struct tcf_idrinfo *idrinfo, struct tc_action *p)
 	spin_lock_bh(&idrinfo->lock);
 	spin_lock_bh(&idrinfo->lock);
 	idr_remove_ext(&idrinfo->action_idr, p->tcfa_index);
 	idr_remove_ext(&idrinfo->action_idr, p->tcfa_index);
 	spin_unlock_bh(&idrinfo->lock);
 	spin_unlock_bh(&idrinfo->lock);
-	put_net(idrinfo->net);
 	gen_kill_estimator(&p->tcfa_rate_est);
 	gen_kill_estimator(&p->tcfa_rate_est);
 	free_tcf(p);
 	free_tcf(p);
 }
 }
@@ -339,7 +338,6 @@ err3:
 	p->idrinfo = idrinfo;
 	p->idrinfo = idrinfo;
 	p->ops = ops;
 	p->ops = ops;
 	INIT_LIST_HEAD(&p->list);
 	INIT_LIST_HEAD(&p->list);
-	get_net(idrinfo->net);
 	*a = p;
 	*a = p;
 	return 0;
 	return 0;
 }
 }

+ 1 - 1
net/sched/act_bpf.c

@@ -398,7 +398,7 @@ static __net_init int bpf_init_net(struct net *net)
 {
 {
 	struct tc_action_net *tn = net_generic(net, bpf_net_id);
 	struct tc_action_net *tn = net_generic(net, bpf_net_id);
 
 
-	return tc_action_net_init(tn, &act_bpf_ops, net);
+	return tc_action_net_init(tn, &act_bpf_ops);
 }
 }
 
 
 static void __net_exit bpf_exit_net(struct net *net)
 static void __net_exit bpf_exit_net(struct net *net)

+ 1 - 1
net/sched/act_connmark.c

@@ -206,7 +206,7 @@ static __net_init int connmark_init_net(struct net *net)
 {
 {
 	struct tc_action_net *tn = net_generic(net, connmark_net_id);
 	struct tc_action_net *tn = net_generic(net, connmark_net_id);
 
 
-	return tc_action_net_init(tn, &act_connmark_ops, net);
+	return tc_action_net_init(tn, &act_connmark_ops);
 }
 }
 
 
 static void __net_exit connmark_exit_net(struct net *net)
 static void __net_exit connmark_exit_net(struct net *net)

+ 1 - 1
net/sched/act_csum.c

@@ -626,7 +626,7 @@ static __net_init int csum_init_net(struct net *net)
 {
 {
 	struct tc_action_net *tn = net_generic(net, csum_net_id);
 	struct tc_action_net *tn = net_generic(net, csum_net_id);
 
 
-	return tc_action_net_init(tn, &act_csum_ops, net);
+	return tc_action_net_init(tn, &act_csum_ops);
 }
 }
 
 
 static void __net_exit csum_exit_net(struct net *net)
 static void __net_exit csum_exit_net(struct net *net)

+ 1 - 1
net/sched/act_gact.c

@@ -232,7 +232,7 @@ static __net_init int gact_init_net(struct net *net)
 {
 {
 	struct tc_action_net *tn = net_generic(net, gact_net_id);
 	struct tc_action_net *tn = net_generic(net, gact_net_id);
 
 
-	return tc_action_net_init(tn, &act_gact_ops, net);
+	return tc_action_net_init(tn, &act_gact_ops);
 }
 }
 
 
 static void __net_exit gact_exit_net(struct net *net)
 static void __net_exit gact_exit_net(struct net *net)

+ 1 - 1
net/sched/act_ife.c

@@ -855,7 +855,7 @@ static __net_init int ife_init_net(struct net *net)
 {
 {
 	struct tc_action_net *tn = net_generic(net, ife_net_id);
 	struct tc_action_net *tn = net_generic(net, ife_net_id);
 
 
-	return tc_action_net_init(tn, &act_ife_ops, net);
+	return tc_action_net_init(tn, &act_ife_ops);
 }
 }
 
 
 static void __net_exit ife_exit_net(struct net *net)
 static void __net_exit ife_exit_net(struct net *net)

+ 2 - 2
net/sched/act_ipt.c

@@ -334,7 +334,7 @@ static __net_init int ipt_init_net(struct net *net)
 {
 {
 	struct tc_action_net *tn = net_generic(net, ipt_net_id);
 	struct tc_action_net *tn = net_generic(net, ipt_net_id);
 
 
-	return tc_action_net_init(tn, &act_ipt_ops, net);
+	return tc_action_net_init(tn, &act_ipt_ops);
 }
 }
 
 
 static void __net_exit ipt_exit_net(struct net *net)
 static void __net_exit ipt_exit_net(struct net *net)
@@ -384,7 +384,7 @@ static __net_init int xt_init_net(struct net *net)
 {
 {
 	struct tc_action_net *tn = net_generic(net, xt_net_id);
 	struct tc_action_net *tn = net_generic(net, xt_net_id);
 
 
-	return tc_action_net_init(tn, &act_xt_ops, net);
+	return tc_action_net_init(tn, &act_xt_ops);
 }
 }
 
 
 static void __net_exit xt_exit_net(struct net *net)
 static void __net_exit xt_exit_net(struct net *net)

+ 1 - 1
net/sched/act_mirred.c

@@ -340,7 +340,7 @@ static __net_init int mirred_init_net(struct net *net)
 {
 {
 	struct tc_action_net *tn = net_generic(net, mirred_net_id);
 	struct tc_action_net *tn = net_generic(net, mirred_net_id);
 
 
-	return tc_action_net_init(tn, &act_mirred_ops, net);
+	return tc_action_net_init(tn, &act_mirred_ops);
 }
 }
 
 
 static void __net_exit mirred_exit_net(struct net *net)
 static void __net_exit mirred_exit_net(struct net *net)

+ 1 - 1
net/sched/act_nat.c

@@ -307,7 +307,7 @@ static __net_init int nat_init_net(struct net *net)
 {
 {
 	struct tc_action_net *tn = net_generic(net, nat_net_id);
 	struct tc_action_net *tn = net_generic(net, nat_net_id);
 
 
-	return tc_action_net_init(tn, &act_nat_ops, net);
+	return tc_action_net_init(tn, &act_nat_ops);
 }
 }
 
 
 static void __net_exit nat_exit_net(struct net *net)
 static void __net_exit nat_exit_net(struct net *net)

+ 1 - 1
net/sched/act_pedit.c

@@ -450,7 +450,7 @@ static __net_init int pedit_init_net(struct net *net)
 {
 {
 	struct tc_action_net *tn = net_generic(net, pedit_net_id);
 	struct tc_action_net *tn = net_generic(net, pedit_net_id);
 
 
-	return tc_action_net_init(tn, &act_pedit_ops, net);
+	return tc_action_net_init(tn, &act_pedit_ops);
 }
 }
 
 
 static void __net_exit pedit_exit_net(struct net *net)
 static void __net_exit pedit_exit_net(struct net *net)

+ 1 - 1
net/sched/act_police.c

@@ -331,7 +331,7 @@ static __net_init int police_init_net(struct net *net)
 {
 {
 	struct tc_action_net *tn = net_generic(net, police_net_id);
 	struct tc_action_net *tn = net_generic(net, police_net_id);
 
 
-	return tc_action_net_init(tn, &act_police_ops, net);
+	return tc_action_net_init(tn, &act_police_ops);
 }
 }
 
 
 static void __net_exit police_exit_net(struct net *net)
 static void __net_exit police_exit_net(struct net *net)

+ 1 - 1
net/sched/act_sample.c

@@ -240,7 +240,7 @@ static __net_init int sample_init_net(struct net *net)
 {
 {
 	struct tc_action_net *tn = net_generic(net, sample_net_id);
 	struct tc_action_net *tn = net_generic(net, sample_net_id);
 
 
-	return tc_action_net_init(tn, &act_sample_ops, net);
+	return tc_action_net_init(tn, &act_sample_ops);
 }
 }
 
 
 static void __net_exit sample_exit_net(struct net *net)
 static void __net_exit sample_exit_net(struct net *net)

+ 1 - 1
net/sched/act_simple.c

@@ -201,7 +201,7 @@ static __net_init int simp_init_net(struct net *net)
 {
 {
 	struct tc_action_net *tn = net_generic(net, simp_net_id);
 	struct tc_action_net *tn = net_generic(net, simp_net_id);
 
 
-	return tc_action_net_init(tn, &act_simp_ops, net);
+	return tc_action_net_init(tn, &act_simp_ops);
 }
 }
 
 
 static void __net_exit simp_exit_net(struct net *net)
 static void __net_exit simp_exit_net(struct net *net)

+ 1 - 1
net/sched/act_skbedit.c

@@ -238,7 +238,7 @@ static __net_init int skbedit_init_net(struct net *net)
 {
 {
 	struct tc_action_net *tn = net_generic(net, skbedit_net_id);
 	struct tc_action_net *tn = net_generic(net, skbedit_net_id);
 
 
-	return tc_action_net_init(tn, &act_skbedit_ops, net);
+	return tc_action_net_init(tn, &act_skbedit_ops);
 }
 }
 
 
 static void __net_exit skbedit_exit_net(struct net *net)
 static void __net_exit skbedit_exit_net(struct net *net)

+ 1 - 1
net/sched/act_skbmod.c

@@ -263,7 +263,7 @@ static __net_init int skbmod_init_net(struct net *net)
 {
 {
 	struct tc_action_net *tn = net_generic(net, skbmod_net_id);
 	struct tc_action_net *tn = net_generic(net, skbmod_net_id);
 
 
-	return tc_action_net_init(tn, &act_skbmod_ops, net);
+	return tc_action_net_init(tn, &act_skbmod_ops);
 }
 }
 
 
 static void __net_exit skbmod_exit_net(struct net *net)
 static void __net_exit skbmod_exit_net(struct net *net)

+ 1 - 1
net/sched/act_tunnel_key.c

@@ -322,7 +322,7 @@ static __net_init int tunnel_key_init_net(struct net *net)
 {
 {
 	struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
 	struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
 
 
-	return tc_action_net_init(tn, &act_tunnel_key_ops, net);
+	return tc_action_net_init(tn, &act_tunnel_key_ops);
 }
 }
 
 
 static void __net_exit tunnel_key_exit_net(struct net *net)
 static void __net_exit tunnel_key_exit_net(struct net *net)

+ 1 - 1
net/sched/act_vlan.c

@@ -269,7 +269,7 @@ static __net_init int vlan_init_net(struct net *net)
 {
 {
 	struct tc_action_net *tn = net_generic(net, vlan_net_id);
 	struct tc_action_net *tn = net_generic(net, vlan_net_id);
 
 
-	return tc_action_net_init(tn, &act_vlan_ops, net);
+	return tc_action_net_init(tn, &act_vlan_ops);
 }
 }
 
 
 static void __net_exit vlan_exit_net(struct net *net)
 static void __net_exit vlan_exit_net(struct net *net)

+ 1 - 0
net/sched/cls_api.c

@@ -1110,6 +1110,7 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
 				exts->actions[i++] = act;
 				exts->actions[i++] = act;
 			exts->nr_actions = i;
 			exts->nr_actions = i;
 		}
 		}
+		exts->net = net;
 	}
 	}
 #else
 #else
 	if ((exts->action && tb[exts->action]) ||
 	if ((exts->action && tb[exts->action]) ||

+ 15 - 5
net/sched/cls_basic.c

@@ -87,16 +87,21 @@ static int basic_init(struct tcf_proto *tp)
 	return 0;
 	return 0;
 }
 }
 
 
+static void __basic_delete_filter(struct basic_filter *f)
+{
+	tcf_exts_destroy(&f->exts);
+	tcf_em_tree_destroy(&f->ematches);
+	tcf_exts_put_net(&f->exts);
+	kfree(f);
+}
+
 static void basic_delete_filter_work(struct work_struct *work)
 static void basic_delete_filter_work(struct work_struct *work)
 {
 {
 	struct basic_filter *f = container_of(work, struct basic_filter, work);
 	struct basic_filter *f = container_of(work, struct basic_filter, work);
 
 
 	rtnl_lock();
 	rtnl_lock();
-	tcf_exts_destroy(&f->exts);
-	tcf_em_tree_destroy(&f->ematches);
+	__basic_delete_filter(f);
 	rtnl_unlock();
 	rtnl_unlock();
-
-	kfree(f);
 }
 }
 
 
 static void basic_delete_filter(struct rcu_head *head)
 static void basic_delete_filter(struct rcu_head *head)
@@ -116,7 +121,10 @@ static void basic_destroy(struct tcf_proto *tp)
 		list_del_rcu(&f->link);
 		list_del_rcu(&f->link);
 		tcf_unbind_filter(tp, &f->res);
 		tcf_unbind_filter(tp, &f->res);
 		idr_remove_ext(&head->handle_idr, f->handle);
 		idr_remove_ext(&head->handle_idr, f->handle);
-		call_rcu(&f->rcu, basic_delete_filter);
+		if (tcf_exts_get_net(&f->exts))
+			call_rcu(&f->rcu, basic_delete_filter);
+		else
+			__basic_delete_filter(f);
 	}
 	}
 	idr_destroy(&head->handle_idr);
 	idr_destroy(&head->handle_idr);
 	kfree_rcu(head, rcu);
 	kfree_rcu(head, rcu);
@@ -130,6 +138,7 @@ static int basic_delete(struct tcf_proto *tp, void *arg, bool *last)
 	list_del_rcu(&f->link);
 	list_del_rcu(&f->link);
 	tcf_unbind_filter(tp, &f->res);
 	tcf_unbind_filter(tp, &f->res);
 	idr_remove_ext(&head->handle_idr, f->handle);
 	idr_remove_ext(&head->handle_idr, f->handle);
+	tcf_exts_get_net(&f->exts);
 	call_rcu(&f->rcu, basic_delete_filter);
 	call_rcu(&f->rcu, basic_delete_filter);
 	*last = list_empty(&head->flist);
 	*last = list_empty(&head->flist);
 	return 0;
 	return 0;
@@ -225,6 +234,7 @@ static int basic_change(struct net *net, struct sk_buff *in_skb,
 		idr_replace_ext(&head->handle_idr, fnew, fnew->handle);
 		idr_replace_ext(&head->handle_idr, fnew, fnew->handle);
 		list_replace_rcu(&fold->link, &fnew->link);
 		list_replace_rcu(&fold->link, &fnew->link);
 		tcf_unbind_filter(tp, &fold->res);
 		tcf_unbind_filter(tp, &fold->res);
+		tcf_exts_get_net(&fold->exts);
 		call_rcu(&fold->rcu, basic_delete_filter);
 		call_rcu(&fold->rcu, basic_delete_filter);
 	} else {
 	} else {
 		list_add_rcu(&fnew->link, &head->flist);
 		list_add_rcu(&fnew->link, &head->flist);

+ 6 - 1
net/sched/cls_bpf.c

@@ -261,6 +261,7 @@ static int cls_bpf_init(struct tcf_proto *tp)
 static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
 static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
 {
 {
 	tcf_exts_destroy(&prog->exts);
 	tcf_exts_destroy(&prog->exts);
+	tcf_exts_put_net(&prog->exts);
 
 
 	if (cls_bpf_is_ebpf(prog))
 	if (cls_bpf_is_ebpf(prog))
 		bpf_prog_put(prog->filter);
 		bpf_prog_put(prog->filter);
@@ -297,7 +298,10 @@ static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)
 	cls_bpf_stop_offload(tp, prog);
 	cls_bpf_stop_offload(tp, prog);
 	list_del_rcu(&prog->link);
 	list_del_rcu(&prog->link);
 	tcf_unbind_filter(tp, &prog->res);
 	tcf_unbind_filter(tp, &prog->res);
-	call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu);
+	if (tcf_exts_get_net(&prog->exts))
+		call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu);
+	else
+		__cls_bpf_delete_prog(prog);
 }
 }
 
 
 static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last)
 static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last)
@@ -526,6 +530,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
 		idr_replace_ext(&head->handle_idr, prog, handle);
 		idr_replace_ext(&head->handle_idr, prog, handle);
 		list_replace_rcu(&oldprog->link, &prog->link);
 		list_replace_rcu(&oldprog->link, &prog->link);
 		tcf_unbind_filter(tp, &oldprog->res);
 		tcf_unbind_filter(tp, &oldprog->res);
+		tcf_exts_get_net(&oldprog->exts);
 		call_rcu(&oldprog->rcu, cls_bpf_delete_prog_rcu);
 		call_rcu(&oldprog->rcu, cls_bpf_delete_prog_rcu);
 	} else {
 	} else {
 		list_add_rcu(&prog->link, &head->plist);
 		list_add_rcu(&prog->link, &head->plist);

+ 18 - 6
net/sched/cls_cgroup.c

@@ -60,15 +60,21 @@ static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
 	[TCA_CGROUP_EMATCHES]	= { .type = NLA_NESTED },
 	[TCA_CGROUP_EMATCHES]	= { .type = NLA_NESTED },
 };
 };
 
 
+static void __cls_cgroup_destroy(struct cls_cgroup_head *head)
+{
+	tcf_exts_destroy(&head->exts);
+	tcf_em_tree_destroy(&head->ematches);
+	tcf_exts_put_net(&head->exts);
+	kfree(head);
+}
+
 static void cls_cgroup_destroy_work(struct work_struct *work)
 static void cls_cgroup_destroy_work(struct work_struct *work)
 {
 {
 	struct cls_cgroup_head *head = container_of(work,
 	struct cls_cgroup_head *head = container_of(work,
 						    struct cls_cgroup_head,
 						    struct cls_cgroup_head,
 						    work);
 						    work);
 	rtnl_lock();
 	rtnl_lock();
-	tcf_exts_destroy(&head->exts);
-	tcf_em_tree_destroy(&head->ematches);
-	kfree(head);
+	__cls_cgroup_destroy(head);
 	rtnl_unlock();
 	rtnl_unlock();
 }
 }
 
 
@@ -124,8 +130,10 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
 		goto errout;
 		goto errout;
 
 
 	rcu_assign_pointer(tp->root, new);
 	rcu_assign_pointer(tp->root, new);
-	if (head)
+	if (head) {
+		tcf_exts_get_net(&head->exts);
 		call_rcu(&head->rcu, cls_cgroup_destroy_rcu);
 		call_rcu(&head->rcu, cls_cgroup_destroy_rcu);
+	}
 	return 0;
 	return 0;
 errout:
 errout:
 	tcf_exts_destroy(&new->exts);
 	tcf_exts_destroy(&new->exts);
@@ -138,8 +146,12 @@ static void cls_cgroup_destroy(struct tcf_proto *tp)
 	struct cls_cgroup_head *head = rtnl_dereference(tp->root);
 	struct cls_cgroup_head *head = rtnl_dereference(tp->root);
 
 
 	/* Head can still be NULL due to cls_cgroup_init(). */
 	/* Head can still be NULL due to cls_cgroup_init(). */
-	if (head)
-		call_rcu(&head->rcu, cls_cgroup_destroy_rcu);
+	if (head) {
+		if (tcf_exts_get_net(&head->exts))
+			call_rcu(&head->rcu, cls_cgroup_destroy_rcu);
+		else
+			__cls_cgroup_destroy(head);
+	}
 }
 }
 
 
 static int cls_cgroup_delete(struct tcf_proto *tp, void *arg, bool *last)
 static int cls_cgroup_delete(struct tcf_proto *tp, void *arg, bool *last)

+ 18 - 6
net/sched/cls_flow.c

@@ -372,15 +372,21 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
 	[TCA_FLOW_PERTURB]	= { .type = NLA_U32 },
 	[TCA_FLOW_PERTURB]	= { .type = NLA_U32 },
 };
 };
 
 
-static void flow_destroy_filter_work(struct work_struct *work)
+static void __flow_destroy_filter(struct flow_filter *f)
 {
 {
-	struct flow_filter *f = container_of(work, struct flow_filter, work);
-
-	rtnl_lock();
 	del_timer_sync(&f->perturb_timer);
 	del_timer_sync(&f->perturb_timer);
 	tcf_exts_destroy(&f->exts);
 	tcf_exts_destroy(&f->exts);
 	tcf_em_tree_destroy(&f->ematches);
 	tcf_em_tree_destroy(&f->ematches);
+	tcf_exts_put_net(&f->exts);
 	kfree(f);
 	kfree(f);
+}
+
+static void flow_destroy_filter_work(struct work_struct *work)
+{
+	struct flow_filter *f = container_of(work, struct flow_filter, work);
+
+	rtnl_lock();
+	__flow_destroy_filter(f);
 	rtnl_unlock();
 	rtnl_unlock();
 }
 }
 
 
@@ -554,8 +560,10 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
 
 
 	*arg = fnew;
 	*arg = fnew;
 
 
-	if (fold)
+	if (fold) {
+		tcf_exts_get_net(&fold->exts);
 		call_rcu(&fold->rcu, flow_destroy_filter);
 		call_rcu(&fold->rcu, flow_destroy_filter);
+	}
 	return 0;
 	return 0;
 
 
 err2:
 err2:
@@ -572,6 +580,7 @@ static int flow_delete(struct tcf_proto *tp, void *arg, bool *last)
 	struct flow_filter *f = arg;
 	struct flow_filter *f = arg;
 
 
 	list_del_rcu(&f->list);
 	list_del_rcu(&f->list);
+	tcf_exts_get_net(&f->exts);
 	call_rcu(&f->rcu, flow_destroy_filter);
 	call_rcu(&f->rcu, flow_destroy_filter);
 	*last = list_empty(&head->filters);
 	*last = list_empty(&head->filters);
 	return 0;
 	return 0;
@@ -596,7 +605,10 @@ static void flow_destroy(struct tcf_proto *tp)
 
 
 	list_for_each_entry_safe(f, next, &head->filters, list) {
 	list_for_each_entry_safe(f, next, &head->filters, list) {
 		list_del_rcu(&f->list);
 		list_del_rcu(&f->list);
-		call_rcu(&f->rcu, flow_destroy_filter);
+		if (tcf_exts_get_net(&f->exts))
+			call_rcu(&f->rcu, flow_destroy_filter);
+		else
+			__flow_destroy_filter(f);
 	}
 	}
 	kfree_rcu(head, rcu);
 	kfree_rcu(head, rcu);
 }
 }

部分文件因为文件数量过多而无法显示