Browse Source

Merge branch 'drm-armada-fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-cubox into drm-next

Just one-liner which corrects a select statement for DRM_KMS_FB_HELPER
which looks like it was missed in the initial merge.  Based on 3.13.

* 'drm-armada-fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-cubox: (55 commits)
  DRM: armada: fix missing DRM_KMS_FB_HELPER select
Dave Airlie 11 năm trước cách đây
mục cha
commit
a5bd4f8ab0
63 tập tin đã thay đổi với 384 bổ sung234 xóa
  1. 3 0
      MAINTAINERS
  2. 1 1
      Makefile
  3. 1 1
      arch/arm/kernel/devtree.c
  4. 1 1
      arch/arm/kernel/perf_event_cpu.c
  5. 3 2
      arch/arm/kernel/traps.c
  6. 1 0
      arch/arm/mach-highbank/highbank.c
  7. 1 0
      arch/arm/mach-omap2/omap4-common.c
  8. 1 1
      arch/arm/mm/init.c
  9. 3 3
      arch/arm/net/bpf_jit_32.c
  10. 1 1
      arch/arm64/include/asm/io.h
  11. 1 1
      arch/mips/include/asm/cacheops.h
  12. 26 25
      arch/mips/include/asm/r4kcache.h
  13. 9 2
      arch/mips/mm/c-r4k.c
  14. 1 1
      arch/parisc/include/uapi/asm/socket.h
  15. 4 3
      arch/powerpc/net/bpf_jit_comp.c
  16. 18 11
      arch/s390/net/bpf_jit_comp.c
  17. 14 3
      arch/sparc/net/bpf_jit_comp.c
  18. 45 8
      arch/x86/kernel/cpu/perf_event_amd_ibs.c
  19. 1 1
      arch/x86/kvm/lapic.c
  20. 18 0
      arch/x86/mm/fault.c
  21. 10 4
      arch/x86/net/bpf_jit_comp.c
  22. 4 4
      arch/x86/vdso/vclock_gettime.c
  23. 0 1
      drivers/acpi/acpi_lpss.c
  24. 13 8
      drivers/clocksource/cadence_ttc_timer.c
  25. 1 0
      drivers/gpu/drm/armada/Kconfig
  26. 2 2
      drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c
  27. 1 1
      drivers/hwmon/coretemp.c
  28. 15 3
      drivers/md/md.c
  29. 3 0
      drivers/md/md.h
  30. 1 2
      drivers/md/raid1.c
  31. 6 6
      drivers/md/raid10.c
  32. 4 3
      drivers/md/raid5.c
  33. 15 14
      drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
  34. 1 1
      drivers/net/ethernet/chelsio/cxgb4/l2t.c
  35. 9 2
      drivers/net/ethernet/emulex/benet/be_main.c
  36. 2 6
      drivers/net/ethernet/intel/e1000e/netdev.c
  37. 2 0
      drivers/net/ethernet/qlogic/qlge/qlge_main.c
  38. 1 0
      drivers/net/ethernet/via/via-rhine.c
  39. 12 0
      drivers/net/usb/dm9601.c
  40. 1 1
      drivers/net/usb/usbnet.c
  41. 0 1
      drivers/pinctrl/pinctrl-baytrail.c
  42. 6 1
      fs/dcache.c
  43. 9 6
      fs/fs-writeback.c
  44. 1 1
      fs/namespace.c
  45. 6 4
      fs/nilfs2/segment.c
  46. 2 0
      include/linux/crash_dump.h
  47. 1 1
      include/linux/i2c.h
  48. 19 8
      include/linux/seqlock.h
  49. 0 1
      include/net/if_inet6.h
  50. 1 1
      kernel/fork.c
  51. 1 1
      kernel/sched/fair.c
  52. 3 3
      kernel/time/sched_clock.c
  53. 2 2
      lib/percpu_counter.c
  54. 4 1
      mm/util.c
  55. 1 1
      net/batman-adv/main.c
  56. 2 28
      net/core/filter.c
  57. 4 2
      net/ieee802154/nl-phy.c
  58. 4 1
      net/ipv4/inet_diag.c
  59. 5 2
      net/ipv4/ipmr.c
  60. 32 19
      net/ipv4/tcp_metrics.c
  61. 17 21
      net/ipv6/addrconf.c
  62. 5 2
      net/ipv6/ip6mr.c
  63. 3 4
      net/rds/ib_recv.c

+ 3 - 0
MAINTAINERS

@@ -9231,6 +9231,7 @@ F:	include/media/videobuf2-*
 
 
 VIRTIO CONSOLE DRIVER
 VIRTIO CONSOLE DRIVER
 M:	Amit Shah <amit.shah@redhat.com>
 M:	Amit Shah <amit.shah@redhat.com>
+L:	virtio-dev@lists.oasis-open.org
 L:	virtualization@lists.linux-foundation.org
 L:	virtualization@lists.linux-foundation.org
 S:	Maintained
 S:	Maintained
 F:	drivers/char/virtio_console.c
 F:	drivers/char/virtio_console.c
@@ -9240,6 +9241,7 @@ F:	include/uapi/linux/virtio_console.h
 VIRTIO CORE, NET AND BLOCK DRIVERS
 VIRTIO CORE, NET AND BLOCK DRIVERS
 M:	Rusty Russell <rusty@rustcorp.com.au>
 M:	Rusty Russell <rusty@rustcorp.com.au>
 M:	"Michael S. Tsirkin" <mst@redhat.com>
 M:	"Michael S. Tsirkin" <mst@redhat.com>
+L:	virtio-dev@lists.oasis-open.org
 L:	virtualization@lists.linux-foundation.org
 L:	virtualization@lists.linux-foundation.org
 S:	Maintained
 S:	Maintained
 F:	drivers/virtio/
 F:	drivers/virtio/
@@ -9252,6 +9254,7 @@ F:	include/uapi/linux/virtio_*.h
 VIRTIO HOST (VHOST)
 VIRTIO HOST (VHOST)
 M:	"Michael S. Tsirkin" <mst@redhat.com>
 M:	"Michael S. Tsirkin" <mst@redhat.com>
 L:	kvm@vger.kernel.org
 L:	kvm@vger.kernel.org
+L:	virtio-dev@lists.oasis-open.org
 L:	virtualization@lists.linux-foundation.org
 L:	virtualization@lists.linux-foundation.org
 L:	netdev@vger.kernel.org
 L:	netdev@vger.kernel.org
 S:	Maintained
 S:	Maintained

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 3
 VERSION = 3
 PATCHLEVEL = 13
 PATCHLEVEL = 13
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc8
+EXTRAVERSION =
 NAME = One Giant Leap for Frogkind
 NAME = One Giant Leap for Frogkind
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*

+ 1 - 1
arch/arm/kernel/devtree.c

@@ -171,7 +171,7 @@ void __init arm_dt_init_cpu_maps(void)
 
 
 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
 {
 {
-	return (phys_id & MPIDR_HWID_BITMASK) == cpu_logical_map(cpu);
+	return phys_id == cpu_logical_map(cpu);
 }
 }
 
 
 static const void * __init arch_get_next_mach(const char *const **match)
 static const void * __init arch_get_next_mach(const char *const **match)

+ 1 - 1
arch/arm/kernel/perf_event_cpu.c

@@ -254,7 +254,7 @@ static int probe_current_pmu(struct arm_pmu *pmu)
 static int cpu_pmu_device_probe(struct platform_device *pdev)
 static int cpu_pmu_device_probe(struct platform_device *pdev)
 {
 {
 	const struct of_device_id *of_id;
 	const struct of_device_id *of_id;
-	int (*init_fn)(struct arm_pmu *);
+	const int (*init_fn)(struct arm_pmu *);
 	struct device_node *node = pdev->dev.of_node;
 	struct device_node *node = pdev->dev.of_node;
 	struct arm_pmu *pmu;
 	struct arm_pmu *pmu;
 	int ret = -ENODEV;
 	int ret = -ENODEV;

+ 3 - 2
arch/arm/kernel/traps.c

@@ -431,9 +431,10 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
 			instr2 = __mem_to_opcode_thumb16(instr2);
 			instr2 = __mem_to_opcode_thumb16(instr2);
 			instr = __opcode_thumb32_compose(instr, instr2);
 			instr = __opcode_thumb32_compose(instr, instr2);
 		}
 		}
-	} else if (get_user(instr, (u32 __user *)pc)) {
+	} else {
+		if (get_user(instr, (u32 __user *)pc))
+			goto die_sig;
 		instr = __mem_to_opcode_arm(instr);
 		instr = __mem_to_opcode_arm(instr);
-		goto die_sig;
 	}
 	}
 
 
 	if (call_undef_hook(regs, instr) == 0)
 	if (call_undef_hook(regs, instr) == 0)

+ 1 - 0
arch/arm/mach-highbank/highbank.c

@@ -53,6 +53,7 @@ static void __init highbank_scu_map_io(void)
 
 
 static void highbank_l2x0_disable(void)
 static void highbank_l2x0_disable(void)
 {
 {
+	outer_flush_all();
 	/* Disable PL310 L2 Cache controller */
 	/* Disable PL310 L2 Cache controller */
 	highbank_smc1(0x102, 0x0);
 	highbank_smc1(0x102, 0x0);
 }
 }

+ 1 - 0
arch/arm/mach-omap2/omap4-common.c

@@ -162,6 +162,7 @@ void __iomem *omap4_get_l2cache_base(void)
 
 
 static void omap4_l2x0_disable(void)
 static void omap4_l2x0_disable(void)
 {
 {
+	outer_flush_all();
 	/* Disable PL310 L2 Cache controller */
 	/* Disable PL310 L2 Cache controller */
 	omap_smc1(0x102, 0x0);
 	omap_smc1(0x102, 0x0);
 }
 }

+ 1 - 1
arch/arm/mm/init.c

@@ -229,7 +229,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc)
 #ifdef CONFIG_ZONE_DMA
 #ifdef CONFIG_ZONE_DMA
 	if (mdesc->dma_zone_size) {
 	if (mdesc->dma_zone_size) {
 		arm_dma_zone_size = mdesc->dma_zone_size;
 		arm_dma_zone_size = mdesc->dma_zone_size;
-		arm_dma_limit = __pv_phys_offset + arm_dma_zone_size - 1;
+		arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
 	} else
 	} else
 		arm_dma_limit = 0xffffffff;
 		arm_dma_limit = 0xffffffff;
 	arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
 	arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;

+ 3 - 3
arch/arm/net/bpf_jit_32.c

@@ -641,10 +641,10 @@ load_ind:
 			emit(ARM_MUL(r_A, r_A, r_X), ctx);
 			emit(ARM_MUL(r_A, r_A, r_X), ctx);
 			break;
 			break;
 		case BPF_S_ALU_DIV_K:
 		case BPF_S_ALU_DIV_K:
-			/* current k == reciprocal_value(userspace k) */
+			if (k == 1)
+				break;
 			emit_mov_i(r_scratch, k, ctx);
 			emit_mov_i(r_scratch, k, ctx);
-			/* A = top 32 bits of the product */
-			emit(ARM_UMULL(r_scratch, r_A, r_A, r_scratch), ctx);
+			emit_udiv(r_A, r_A, r_scratch, ctx);
 			break;
 			break;
 		case BPF_S_ALU_DIV_X:
 		case BPF_S_ALU_DIV_X:
 			update_on_xread(ctx);
 			update_on_xread(ctx);

+ 1 - 1
arch/arm64/include/asm/io.h

@@ -229,7 +229,7 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot
 extern void __iounmap(volatile void __iomem *addr);
 extern void __iounmap(volatile void __iomem *addr);
 extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
 extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
 
 
-#define PROT_DEFAULT		(pgprot_default | PTE_DIRTY)
+#define PROT_DEFAULT		(PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY)
 #define PROT_DEVICE_nGnRE	(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
 #define PROT_DEVICE_nGnRE	(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
 #define PROT_NORMAL_NC		(PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC))
 #define PROT_NORMAL_NC		(PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC))
 #define PROT_NORMAL		(PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
 #define PROT_NORMAL		(PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))

+ 1 - 1
arch/mips/include/asm/cacheops.h

@@ -83,6 +83,6 @@
 /*
 /*
  * Loongson2-specific cacheops
  * Loongson2-specific cacheops
  */
  */
-#define Hit_Invalidate_I_Loongson23	0x00
+#define Hit_Invalidate_I_Loongson2	0x00
 
 
 #endif	/* __ASM_CACHEOPS_H */
 #endif	/* __ASM_CACHEOPS_H */

+ 26 - 25
arch/mips/include/asm/r4kcache.h

@@ -165,7 +165,7 @@ static inline void flush_icache_line(unsigned long addr)
 	__iflush_prologue
 	__iflush_prologue
 	switch (boot_cpu_type()) {
 	switch (boot_cpu_type()) {
 	case CPU_LOONGSON2:
 	case CPU_LOONGSON2:
-		cache_op(Hit_Invalidate_I_Loongson23, addr);
+		cache_op(Hit_Invalidate_I_Loongson2, addr);
 		break;
 		break;
 
 
 	default:
 	default:
@@ -219,7 +219,7 @@ static inline void protected_flush_icache_line(unsigned long addr)
 {
 {
 	switch (boot_cpu_type()) {
 	switch (boot_cpu_type()) {
 	case CPU_LOONGSON2:
 	case CPU_LOONGSON2:
-		protected_cache_op(Hit_Invalidate_I_Loongson23, addr);
+		protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
 		break;
 		break;
 
 
 	default:
 	default:
@@ -357,8 +357,8 @@ static inline void invalidate_tcache_page(unsigned long addr)
 		  "i" (op));
 		  "i" (op));
 
 
 /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
 /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
-#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize) \
-static inline void blast_##pfx##cache##lsize(void)			\
+#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra)	\
+static inline void extra##blast_##pfx##cache##lsize(void)		\
 {									\
 {									\
 	unsigned long start = INDEX_BASE;				\
 	unsigned long start = INDEX_BASE;				\
 	unsigned long end = start + current_cpu_data.desc.waysize;	\
 	unsigned long end = start + current_cpu_data.desc.waysize;	\
@@ -376,7 +376,7 @@ static inline void blast_##pfx##cache##lsize(void)			\
 	__##pfx##flush_epilogue						\
 	__##pfx##flush_epilogue						\
 }									\
 }									\
 									\
 									\
-static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \
+static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
 {									\
 {									\
 	unsigned long start = page;					\
 	unsigned long start = page;					\
 	unsigned long end = page + PAGE_SIZE;				\
 	unsigned long end = page + PAGE_SIZE;				\
@@ -391,7 +391,7 @@ static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \
 	__##pfx##flush_epilogue						\
 	__##pfx##flush_epilogue						\
 }									\
 }									\
 									\
 									\
-static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
+static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
 {									\
 {									\
 	unsigned long indexmask = current_cpu_data.desc.waysize - 1;	\
 	unsigned long indexmask = current_cpu_data.desc.waysize - 1;	\
 	unsigned long start = INDEX_BASE + (page & indexmask);		\
 	unsigned long start = INDEX_BASE + (page & indexmask);		\
@@ -410,23 +410,24 @@ static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page)
 	__##pfx##flush_epilogue						\
 	__##pfx##flush_epilogue						\
 }
 }
 
 
-__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16)
-__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
-__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
-__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32)
-__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
-__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
-__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64)
-__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
-__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
-__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
-
-__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16)
-__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32)
-__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16)
-__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32)
-__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64)
-__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128)
+__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
+__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
+__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
+__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
+__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
+__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
+__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
+__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
+__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
+__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
+__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
+
+__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
+__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
+__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
+__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
+__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
+__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
 
 
 /* build blast_xxx_range, protected_blast_xxx_range */
 /* build blast_xxx_range, protected_blast_xxx_range */
 #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra)	\
 #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra)	\
@@ -452,8 +453,8 @@ static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start,
 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
-__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson23, \
-	protected_, loongson23_)
+__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
+	protected_, loongson2_)
 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
 /* blast_inv_dcache_range */
 /* blast_inv_dcache_range */

+ 9 - 2
arch/mips/mm/c-r4k.c

@@ -237,6 +237,8 @@ static void r4k_blast_icache_page_setup(void)
 		r4k_blast_icache_page = (void *)cache_noop;
 		r4k_blast_icache_page = (void *)cache_noop;
 	else if (ic_lsize == 16)
 	else if (ic_lsize == 16)
 		r4k_blast_icache_page = blast_icache16_page;
 		r4k_blast_icache_page = blast_icache16_page;
+	else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2)
+		r4k_blast_icache_page = loongson2_blast_icache32_page;
 	else if (ic_lsize == 32)
 	else if (ic_lsize == 32)
 		r4k_blast_icache_page = blast_icache32_page;
 		r4k_blast_icache_page = blast_icache32_page;
 	else if (ic_lsize == 64)
 	else if (ic_lsize == 64)
@@ -261,6 +263,9 @@ static void r4k_blast_icache_page_indexed_setup(void)
 		else if (TX49XX_ICACHE_INDEX_INV_WAR)
 		else if (TX49XX_ICACHE_INDEX_INV_WAR)
 			r4k_blast_icache_page_indexed =
 			r4k_blast_icache_page_indexed =
 				tx49_blast_icache32_page_indexed;
 				tx49_blast_icache32_page_indexed;
+		else if (current_cpu_type() == CPU_LOONGSON2)
+			r4k_blast_icache_page_indexed =
+				loongson2_blast_icache32_page_indexed;
 		else
 		else
 			r4k_blast_icache_page_indexed =
 			r4k_blast_icache_page_indexed =
 				blast_icache32_page_indexed;
 				blast_icache32_page_indexed;
@@ -284,6 +289,8 @@ static void r4k_blast_icache_setup(void)
 			r4k_blast_icache = blast_r4600_v1_icache32;
 			r4k_blast_icache = blast_r4600_v1_icache32;
 		else if (TX49XX_ICACHE_INDEX_INV_WAR)
 		else if (TX49XX_ICACHE_INDEX_INV_WAR)
 			r4k_blast_icache = tx49_blast_icache32;
 			r4k_blast_icache = tx49_blast_icache32;
+		else if (current_cpu_type() == CPU_LOONGSON2)
+			r4k_blast_icache = loongson2_blast_icache32;
 		else
 		else
 			r4k_blast_icache = blast_icache32;
 			r4k_blast_icache = blast_icache32;
 	} else if (ic_lsize == 64)
 	} else if (ic_lsize == 64)
@@ -580,11 +587,11 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo
 	else {
 	else {
 		switch (boot_cpu_type()) {
 		switch (boot_cpu_type()) {
 		case CPU_LOONGSON2:
 		case CPU_LOONGSON2:
-			protected_blast_icache_range(start, end);
+			protected_loongson2_blast_icache_range(start, end);
 			break;
 			break;
 
 
 		default:
 		default:
-			protected_loongson23_blast_icache_range(start, end);
+			protected_blast_icache_range(start, end);
 			break;
 			break;
 		}
 		}
 	}
 	}

+ 1 - 1
arch/parisc/include/uapi/asm/socket.h

@@ -75,6 +75,6 @@
 
 
 #define SO_BUSY_POLL		0x4027
 #define SO_BUSY_POLL		0x4027
 
 
-#define SO_MAX_PACING_RATE	0x4048
+#define SO_MAX_PACING_RATE	0x4028
 
 
 #endif /* _UAPI_ASM_SOCKET_H */
 #endif /* _UAPI_ASM_SOCKET_H */

+ 4 - 3
arch/powerpc/net/bpf_jit_comp.c

@@ -223,10 +223,11 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
 			}
 			}
 			PPC_DIVWU(r_A, r_A, r_X);
 			PPC_DIVWU(r_A, r_A, r_X);
 			break;
 			break;
-		case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
+		case BPF_S_ALU_DIV_K: /* A /= K */
+			if (K == 1)
+				break;
 			PPC_LI32(r_scratch1, K);
 			PPC_LI32(r_scratch1, K);
-			/* Top 32 bits of 64bit result -> A */
-			PPC_MULHWU(r_A, r_A, r_scratch1);
+			PPC_DIVWU(r_A, r_A, r_scratch1);
 			break;
 			break;
 		case BPF_S_ALU_AND_X:
 		case BPF_S_ALU_AND_X:
 			ctx->seen |= SEEN_XREG;
 			ctx->seen |= SEEN_XREG;

+ 18 - 11
arch/s390/net/bpf_jit_comp.c

@@ -368,14 +368,16 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
 		EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
 		EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
 		/* lhi %r4,0 */
 		/* lhi %r4,0 */
 		EMIT4(0xa7480000);
 		EMIT4(0xa7480000);
-		/* dr %r4,%r12 */
-		EMIT2(0x1d4c);
+		/* dlr %r4,%r12 */
+		EMIT4(0xb997004c);
 		break;
 		break;
-	case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K) */
-		/* m %r4,<d(K)>(%r13) */
-		EMIT4_DISP(0x5c40d000, EMIT_CONST(K));
-		/* lr %r5,%r4 */
-		EMIT2(0x1854);
+	case BPF_S_ALU_DIV_K: /* A /= K */
+		if (K == 1)
+			break;
+		/* lhi %r4,0 */
+		EMIT4(0xa7480000);
+		/* dl %r4,<d(K)>(%r13) */
+		EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K));
 		break;
 		break;
 	case BPF_S_ALU_MOD_X: /* A %= X */
 	case BPF_S_ALU_MOD_X: /* A %= X */
 		jit->seen |= SEEN_XREG | SEEN_RET0;
 		jit->seen |= SEEN_XREG | SEEN_RET0;
@@ -385,16 +387,21 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
 		EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
 		EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
 		/* lhi %r4,0 */
 		/* lhi %r4,0 */
 		EMIT4(0xa7480000);
 		EMIT4(0xa7480000);
-		/* dr %r4,%r12 */
-		EMIT2(0x1d4c);
+		/* dlr %r4,%r12 */
+		EMIT4(0xb997004c);
 		/* lr %r5,%r4 */
 		/* lr %r5,%r4 */
 		EMIT2(0x1854);
 		EMIT2(0x1854);
 		break;
 		break;
 	case BPF_S_ALU_MOD_K: /* A %= K */
 	case BPF_S_ALU_MOD_K: /* A %= K */
+		if (K == 1) {
+			/* lhi %r5,0 */
+			EMIT4(0xa7580000);
+			break;
+		}
 		/* lhi %r4,0 */
 		/* lhi %r4,0 */
 		EMIT4(0xa7480000);
 		EMIT4(0xa7480000);
-		/* d %r4,<d(K)>(%r13) */
-		EMIT4_DISP(0x5d40d000, EMIT_CONST(K));
+		/* dl %r4,<d(K)>(%r13) */
+		EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K));
 		/* lr %r5,%r4 */
 		/* lr %r5,%r4 */
 		EMIT2(0x1854);
 		EMIT2(0x1854);
 		break;
 		break;

+ 14 - 3
arch/sparc/net/bpf_jit_comp.c

@@ -497,9 +497,20 @@ void bpf_jit_compile(struct sk_filter *fp)
 			case BPF_S_ALU_MUL_K:	/* A *= K */
 			case BPF_S_ALU_MUL_K:	/* A *= K */
 				emit_alu_K(MUL, K);
 				emit_alu_K(MUL, K);
 				break;
 				break;
-			case BPF_S_ALU_DIV_K:	/* A /= K */
-				emit_alu_K(MUL, K);
-				emit_read_y(r_A);
+			case BPF_S_ALU_DIV_K:	/* A /= K with K != 0*/
+				if (K == 1)
+					break;
+				emit_write_y(G0);
+#ifdef CONFIG_SPARC32
+				/* The Sparc v8 architecture requires
+				 * three instructions between a %y
+				 * register write and the first use.
+				 */
+				emit_nop();
+				emit_nop();
+				emit_nop();
+#endif
+				emit_alu_K(DIV, K);
 				break;
 				break;
 			case BPF_S_ALU_DIV_X:	/* A /= X; */
 			case BPF_S_ALU_DIV_X:	/* A /= X; */
 				emit_cmpi(r_X, 0);
 				emit_cmpi(r_X, 0);

+ 45 - 8
arch/x86/kernel/cpu/perf_event_amd_ibs.c

@@ -10,6 +10,7 @@
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/pci.h>
 #include <linux/ptrace.h>
 #include <linux/ptrace.h>
+#include <linux/syscore_ops.h>
 
 
 #include <asm/apic.h>
 #include <asm/apic.h>
 
 
@@ -816,6 +817,18 @@ out:
 	return ret;
 	return ret;
 }
 }
 
 
+static void ibs_eilvt_setup(void)
+{
+	/*
+	 * Force LVT offset assignment for family 10h: The offsets are
+	 * not assigned by the BIOS for this family, so the OS is
+	 * responsible for doing it. If the OS assignment fails, fall
+	 * back to BIOS settings and try to setup this.
+	 */
+	if (boot_cpu_data.x86 == 0x10)
+		force_ibs_eilvt_setup();
+}
+
 static inline int get_ibs_lvt_offset(void)
 static inline int get_ibs_lvt_offset(void)
 {
 {
 	u64 val;
 	u64 val;
@@ -851,6 +864,36 @@ static void clear_APIC_ibs(void *dummy)
 		setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
 		setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
 }
 }
 
 
+#ifdef CONFIG_PM
+
+static int perf_ibs_suspend(void)
+{
+	clear_APIC_ibs(NULL);
+	return 0;
+}
+
+static void perf_ibs_resume(void)
+{
+	ibs_eilvt_setup();
+	setup_APIC_ibs(NULL);
+}
+
+static struct syscore_ops perf_ibs_syscore_ops = {
+	.resume		= perf_ibs_resume,
+	.suspend	= perf_ibs_suspend,
+};
+
+static void perf_ibs_pm_init(void)
+{
+	register_syscore_ops(&perf_ibs_syscore_ops);
+}
+
+#else
+
+static inline void perf_ibs_pm_init(void) { }
+
+#endif
+
 static int
 static int
 perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
 perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
 {
 {
@@ -877,18 +920,12 @@ static __init int amd_ibs_init(void)
 	if (!caps)
 	if (!caps)
 		return -ENODEV;	/* ibs not supported by the cpu */
 		return -ENODEV;	/* ibs not supported by the cpu */
 
 
-	/*
-	 * Force LVT offset assignment for family 10h: The offsets are
-	 * not assigned by the BIOS for this family, so the OS is
-	 * responsible for doing it. If the OS assignment fails, fall
-	 * back to BIOS settings and try to setup this.
-	 */
-	if (boot_cpu_data.x86 == 0x10)
-		force_ibs_eilvt_setup();
+	ibs_eilvt_setup();
 
 
 	if (!ibs_eilvt_valid())
 	if (!ibs_eilvt_valid())
 		goto out;
 		goto out;
 
 
+	perf_ibs_pm_init();
 	get_online_cpus();
 	get_online_cpus();
 	ibs_caps = caps;
 	ibs_caps = caps;
 	/* make ibs_caps visible to other cpus: */
 	/* make ibs_caps visible to other cpus: */

+ 1 - 1
arch/x86/kvm/lapic.c

@@ -1355,7 +1355,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
 	vcpu->arch.apic_base = value;
 	vcpu->arch.apic_base = value;
 
 
 	/* update jump label if enable bit changes */
 	/* update jump label if enable bit changes */
-	if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) {
+	if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
 		if (value & MSR_IA32_APICBASE_ENABLE)
 		if (value & MSR_IA32_APICBASE_ENABLE)
 			static_key_slow_dec_deferred(&apic_hw_disabled);
 			static_key_slow_dec_deferred(&apic_hw_disabled);
 		else
 		else

+ 18 - 0
arch/x86/mm/fault.c

@@ -641,6 +641,20 @@ no_context(struct pt_regs *regs, unsigned long error_code,
 
 
 	/* Are we prepared to handle this kernel fault? */
 	/* Are we prepared to handle this kernel fault? */
 	if (fixup_exception(regs)) {
 	if (fixup_exception(regs)) {
+		/*
+		 * Any interrupt that takes a fault gets the fixup. This makes
+		 * the below recursive fault logic only apply to a faults from
+		 * task context.
+		 */
+		if (in_interrupt())
+			return;
+
+		/*
+		 * Per the above we're !in_interrupt(), aka. task context.
+		 *
+		 * In this case we need to make sure we're not recursively
+		 * faulting through the emulate_vsyscall() logic.
+		 */
 		if (current_thread_info()->sig_on_uaccess_error && signal) {
 		if (current_thread_info()->sig_on_uaccess_error && signal) {
 			tsk->thread.trap_nr = X86_TRAP_PF;
 			tsk->thread.trap_nr = X86_TRAP_PF;
 			tsk->thread.error_code = error_code | PF_USER;
 			tsk->thread.error_code = error_code | PF_USER;
@@ -649,6 +663,10 @@ no_context(struct pt_regs *regs, unsigned long error_code,
 			/* XXX: hwpoison faults will set the wrong code. */
 			/* XXX: hwpoison faults will set the wrong code. */
 			force_sig_info_fault(signal, si_code, address, tsk, 0);
 			force_sig_info_fault(signal, si_code, address, tsk, 0);
 		}
 		}
+
+		/*
+		 * Barring that, we can do the fixup and be happy.
+		 */
 		return;
 		return;
 	}
 	}
 
 

+ 10 - 4
arch/x86/net/bpf_jit_comp.c

@@ -359,15 +359,21 @@ void bpf_jit_compile(struct sk_filter *fp)
 				EMIT2(0x89, 0xd0);	/* mov %edx,%eax */
 				EMIT2(0x89, 0xd0);	/* mov %edx,%eax */
 				break;
 				break;
 			case BPF_S_ALU_MOD_K: /* A %= K; */
 			case BPF_S_ALU_MOD_K: /* A %= K; */
+				if (K == 1) {
+					CLEAR_A();
+					break;
+				}
 				EMIT2(0x31, 0xd2);	/* xor %edx,%edx */
 				EMIT2(0x31, 0xd2);	/* xor %edx,%edx */
 				EMIT1(0xb9);EMIT(K, 4);	/* mov imm32,%ecx */
 				EMIT1(0xb9);EMIT(K, 4);	/* mov imm32,%ecx */
 				EMIT2(0xf7, 0xf1);	/* div %ecx */
 				EMIT2(0xf7, 0xf1);	/* div %ecx */
 				EMIT2(0x89, 0xd0);	/* mov %edx,%eax */
 				EMIT2(0x89, 0xd0);	/* mov %edx,%eax */
 				break;
 				break;
-			case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
-				EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
-				EMIT(K, 4);
-				EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
+			case BPF_S_ALU_DIV_K: /* A /= K */
+				if (K == 1)
+					break;
+				EMIT2(0x31, 0xd2);	/* xor %edx,%edx */
+				EMIT1(0xb9);EMIT(K, 4);	/* mov imm32,%ecx */
+				EMIT2(0xf7, 0xf1);	/* div %ecx */
 				break;
 				break;
 			case BPF_S_ALU_AND_X:
 			case BPF_S_ALU_AND_X:
 				seen |= SEEN_XREG;
 				seen |= SEEN_XREG;

+ 4 - 4
arch/x86/vdso/vclock_gettime.c

@@ -178,7 +178,7 @@ notrace static int __always_inline do_realtime(struct timespec *ts)
 
 
 	ts->tv_nsec = 0;
 	ts->tv_nsec = 0;
 	do {
 	do {
-		seq = read_seqcount_begin_no_lockdep(&gtod->seq);
+		seq = raw_read_seqcount_begin(&gtod->seq);
 		mode = gtod->clock.vclock_mode;
 		mode = gtod->clock.vclock_mode;
 		ts->tv_sec = gtod->wall_time_sec;
 		ts->tv_sec = gtod->wall_time_sec;
 		ns = gtod->wall_time_snsec;
 		ns = gtod->wall_time_snsec;
@@ -198,7 +198,7 @@ notrace static int do_monotonic(struct timespec *ts)
 
 
 	ts->tv_nsec = 0;
 	ts->tv_nsec = 0;
 	do {
 	do {
-		seq = read_seqcount_begin_no_lockdep(&gtod->seq);
+		seq = raw_read_seqcount_begin(&gtod->seq);
 		mode = gtod->clock.vclock_mode;
 		mode = gtod->clock.vclock_mode;
 		ts->tv_sec = gtod->monotonic_time_sec;
 		ts->tv_sec = gtod->monotonic_time_sec;
 		ns = gtod->monotonic_time_snsec;
 		ns = gtod->monotonic_time_snsec;
@@ -214,7 +214,7 @@ notrace static int do_realtime_coarse(struct timespec *ts)
 {
 {
 	unsigned long seq;
 	unsigned long seq;
 	do {
 	do {
-		seq = read_seqcount_begin_no_lockdep(&gtod->seq);
+		seq = raw_read_seqcount_begin(&gtod->seq);
 		ts->tv_sec = gtod->wall_time_coarse.tv_sec;
 		ts->tv_sec = gtod->wall_time_coarse.tv_sec;
 		ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
 		ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
 	} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
 	} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
@@ -225,7 +225,7 @@ notrace static int do_monotonic_coarse(struct timespec *ts)
 {
 {
 	unsigned long seq;
 	unsigned long seq;
 	do {
 	do {
-		seq = read_seqcount_begin_no_lockdep(&gtod->seq);
+		seq = raw_read_seqcount_begin(&gtod->seq);
 		ts->tv_sec = gtod->monotonic_time_coarse.tv_sec;
 		ts->tv_sec = gtod->monotonic_time_coarse.tv_sec;
 		ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec;
 		ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec;
 	} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
 	} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));

+ 0 - 1
drivers/acpi/acpi_lpss.c

@@ -162,7 +162,6 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
 	{ "80860F14", (unsigned long)&byt_sdio_dev_desc },
 	{ "80860F14", (unsigned long)&byt_sdio_dev_desc },
 	{ "80860F41", (unsigned long)&byt_i2c_dev_desc },
 	{ "80860F41", (unsigned long)&byt_i2c_dev_desc },
 	{ "INT33B2", },
 	{ "INT33B2", },
-	{ "INT33FC", },
 
 
 	{ "INT3430", (unsigned long)&lpt_dev_desc },
 	{ "INT3430", (unsigned long)&lpt_dev_desc },
 	{ "INT3431", (unsigned long)&lpt_dev_desc },
 	{ "INT3431", (unsigned long)&lpt_dev_desc },

+ 13 - 8
drivers/clocksource/cadence_ttc_timer.c

@@ -67,11 +67,13 @@
  * struct ttc_timer - This definition defines local timer structure
  * struct ttc_timer - This definition defines local timer structure
  *
  *
  * @base_addr:	Base address of timer
  * @base_addr:	Base address of timer
+ * @freq:	Timer input clock frequency
  * @clk:	Associated clock source
  * @clk:	Associated clock source
  * @clk_rate_change_nb	Notifier block for clock rate changes
  * @clk_rate_change_nb	Notifier block for clock rate changes
  */
  */
 struct ttc_timer {
 struct ttc_timer {
 	void __iomem *base_addr;
 	void __iomem *base_addr;
+	unsigned long freq;
 	struct clk *clk;
 	struct clk *clk;
 	struct notifier_block clk_rate_change_nb;
 	struct notifier_block clk_rate_change_nb;
 };
 };
@@ -196,9 +198,8 @@ static void ttc_set_mode(enum clock_event_mode mode,
 
 
 	switch (mode) {
 	switch (mode) {
 	case CLOCK_EVT_MODE_PERIODIC:
 	case CLOCK_EVT_MODE_PERIODIC:
-		ttc_set_interval(timer,
-				DIV_ROUND_CLOSEST(clk_get_rate(ttce->ttc.clk),
-					PRESCALE * HZ));
+		ttc_set_interval(timer, DIV_ROUND_CLOSEST(ttce->ttc.freq,
+						PRESCALE * HZ));
 		break;
 		break;
 	case CLOCK_EVT_MODE_ONESHOT:
 	case CLOCK_EVT_MODE_ONESHOT:
 	case CLOCK_EVT_MODE_UNUSED:
 	case CLOCK_EVT_MODE_UNUSED:
@@ -273,6 +274,8 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base)
 		return;
 		return;
 	}
 	}
 
 
+	ttccs->ttc.freq = clk_get_rate(ttccs->ttc.clk);
+
 	ttccs->ttc.clk_rate_change_nb.notifier_call =
 	ttccs->ttc.clk_rate_change_nb.notifier_call =
 		ttc_rate_change_clocksource_cb;
 		ttc_rate_change_clocksource_cb;
 	ttccs->ttc.clk_rate_change_nb.next = NULL;
 	ttccs->ttc.clk_rate_change_nb.next = NULL;
@@ -298,16 +301,14 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base)
 	__raw_writel(CNT_CNTRL_RESET,
 	__raw_writel(CNT_CNTRL_RESET,
 		     ttccs->ttc.base_addr + TTC_CNT_CNTRL_OFFSET);
 		     ttccs->ttc.base_addr + TTC_CNT_CNTRL_OFFSET);
 
 
-	err = clocksource_register_hz(&ttccs->cs,
-			clk_get_rate(ttccs->ttc.clk) / PRESCALE);
+	err = clocksource_register_hz(&ttccs->cs, ttccs->ttc.freq / PRESCALE);
 	if (WARN_ON(err)) {
 	if (WARN_ON(err)) {
 		kfree(ttccs);
 		kfree(ttccs);
 		return;
 		return;
 	}
 	}
 
 
 	ttc_sched_clock_val_reg = base + TTC_COUNT_VAL_OFFSET;
 	ttc_sched_clock_val_reg = base + TTC_COUNT_VAL_OFFSET;
-	setup_sched_clock(ttc_sched_clock_read, 16,
-			clk_get_rate(ttccs->ttc.clk) / PRESCALE);
+	setup_sched_clock(ttc_sched_clock_read, 16, ttccs->ttc.freq / PRESCALE);
 }
 }
 
 
 static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
 static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
@@ -334,6 +335,9 @@ static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
 				ndata->new_rate / PRESCALE);
 				ndata->new_rate / PRESCALE);
 		local_irq_restore(flags);
 		local_irq_restore(flags);
 
 
+		/* update cached frequency */
+		ttc->freq = ndata->new_rate;
+
 		/* fall through */
 		/* fall through */
 	}
 	}
 	case PRE_RATE_CHANGE:
 	case PRE_RATE_CHANGE:
@@ -367,6 +371,7 @@ static void __init ttc_setup_clockevent(struct clk *clk,
 	if (clk_notifier_register(ttcce->ttc.clk,
 	if (clk_notifier_register(ttcce->ttc.clk,
 				&ttcce->ttc.clk_rate_change_nb))
 				&ttcce->ttc.clk_rate_change_nb))
 		pr_warn("Unable to register clock notifier.\n");
 		pr_warn("Unable to register clock notifier.\n");
+	ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk);
 
 
 	ttcce->ttc.base_addr = base;
 	ttcce->ttc.base_addr = base;
 	ttcce->ce.name = "ttc_clockevent";
 	ttcce->ce.name = "ttc_clockevent";
@@ -396,7 +401,7 @@ static void __init ttc_setup_clockevent(struct clk *clk,
 	}
 	}
 
 
 	clockevents_config_and_register(&ttcce->ce,
 	clockevents_config_and_register(&ttcce->ce,
-			clk_get_rate(ttcce->ttc.clk) / PRESCALE, 1, 0xfffe);
+			ttcce->ttc.freq / PRESCALE, 1, 0xfffe);
 }
 }
 
 
 /**
 /**

+ 1 - 0
drivers/gpu/drm/armada/Kconfig

@@ -5,6 +5,7 @@ config DRM_ARMADA
 	select FB_CFB_COPYAREA
 	select FB_CFB_COPYAREA
 	select FB_CFB_IMAGEBLIT
 	select FB_CFB_IMAGEBLIT
 	select DRM_KMS_HELPER
 	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
 	help
 	help
 	  Support the "LCD" controllers found on the Marvell Armada 510
 	  Support the "LCD" controllers found on the Marvell Armada 510
 	  devices.  There are two controllers on the device, each controller
 	  devices.  There are two controllers on the device, each controller

+ 2 - 2
drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c

@@ -100,7 +100,7 @@ mxm_match_dcb(struct nouveau_mxm *mxm, u8 *data, void *info)
 static int
 static int
 mxm_dcb_sanitise_entry(struct nouveau_bios *bios, void *data, int idx, u16 pdcb)
 mxm_dcb_sanitise_entry(struct nouveau_bios *bios, void *data, int idx, u16 pdcb)
 {
 {
-	struct nouveau_mxm *mxm = nouveau_mxm(bios);
+	struct nouveau_mxm *mxm = data;
 	struct context ctx = { .outp = (u32 *)(bios->data + pdcb) };
 	struct context ctx = { .outp = (u32 *)(bios->data + pdcb) };
 	u8 type, i2cidx, link, ver, len;
 	u8 type, i2cidx, link, ver, len;
 	u8 *conn;
 	u8 *conn;
@@ -199,7 +199,7 @@ mxm_dcb_sanitise(struct nouveau_mxm *mxm)
 		return;
 		return;
 	}
 	}
 
 
-	dcb_outp_foreach(bios, NULL, mxm_dcb_sanitise_entry);
+	dcb_outp_foreach(bios, mxm, mxm_dcb_sanitise_entry);
 	mxms_foreach(mxm, 0x01, mxm_show_unmatched, NULL);
 	mxms_foreach(mxm, 0x01, mxm_show_unmatched, NULL);
 }
 }
 
 

+ 1 - 1
drivers/hwmon/coretemp.c

@@ -52,7 +52,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
 
 
 #define BASE_SYSFS_ATTR_NO	2	/* Sysfs Base attr no for coretemp */
 #define BASE_SYSFS_ATTR_NO	2	/* Sysfs Base attr no for coretemp */
 #define NUM_REAL_CORES		32	/* Number of Real cores per cpu */
 #define NUM_REAL_CORES		32	/* Number of Real cores per cpu */
-#define CORETEMP_NAME_LENGTH	17	/* String Length of attrs */
+#define CORETEMP_NAME_LENGTH	19	/* String Length of attrs */
 #define MAX_CORE_ATTRS		4	/* Maximum no of basic attrs */
 #define MAX_CORE_ATTRS		4	/* Maximum no of basic attrs */
 #define TOTAL_ATTRS		(MAX_CORE_ATTRS + 1)
 #define TOTAL_ATTRS		(MAX_CORE_ATTRS + 1)
 #define MAX_CORE_DATA		(NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
 #define MAX_CORE_DATA		(NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)

+ 15 - 3
drivers/md/md.c

@@ -1077,6 +1077,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
 	rdev->raid_disk = -1;
 	rdev->raid_disk = -1;
 	clear_bit(Faulty, &rdev->flags);
 	clear_bit(Faulty, &rdev->flags);
 	clear_bit(In_sync, &rdev->flags);
 	clear_bit(In_sync, &rdev->flags);
+	clear_bit(Bitmap_sync, &rdev->flags);
 	clear_bit(WriteMostly, &rdev->flags);
 	clear_bit(WriteMostly, &rdev->flags);
 
 
 	if (mddev->raid_disks == 0) {
 	if (mddev->raid_disks == 0) {
@@ -1155,6 +1156,8 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
 		 */
 		 */
 		if (ev1 < mddev->bitmap->events_cleared)
 		if (ev1 < mddev->bitmap->events_cleared)
 			return 0;
 			return 0;
+		if (ev1 < mddev->events)
+			set_bit(Bitmap_sync, &rdev->flags);
 	} else {
 	} else {
 		if (ev1 < mddev->events)
 		if (ev1 < mddev->events)
 			/* just a hot-add of a new device, leave raid_disk at -1 */
 			/* just a hot-add of a new device, leave raid_disk at -1 */
@@ -1563,6 +1566,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
 	rdev->raid_disk = -1;
 	rdev->raid_disk = -1;
 	clear_bit(Faulty, &rdev->flags);
 	clear_bit(Faulty, &rdev->flags);
 	clear_bit(In_sync, &rdev->flags);
 	clear_bit(In_sync, &rdev->flags);
+	clear_bit(Bitmap_sync, &rdev->flags);
 	clear_bit(WriteMostly, &rdev->flags);
 	clear_bit(WriteMostly, &rdev->flags);
 
 
 	if (mddev->raid_disks == 0) {
 	if (mddev->raid_disks == 0) {
@@ -1645,6 +1649,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
 		 */
 		 */
 		if (ev1 < mddev->bitmap->events_cleared)
 		if (ev1 < mddev->bitmap->events_cleared)
 			return 0;
 			return 0;
+		if (ev1 < mddev->events)
+			set_bit(Bitmap_sync, &rdev->flags);
 	} else {
 	} else {
 		if (ev1 < mddev->events)
 		if (ev1 < mddev->events)
 			/* just a hot-add of a new device, leave raid_disk at -1 */
 			/* just a hot-add of a new device, leave raid_disk at -1 */
@@ -2788,6 +2794,7 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
 		else
 		else
 			rdev->saved_raid_disk = -1;
 			rdev->saved_raid_disk = -1;
 		clear_bit(In_sync, &rdev->flags);
 		clear_bit(In_sync, &rdev->flags);
+		clear_bit(Bitmap_sync, &rdev->flags);
 		err = rdev->mddev->pers->
 		err = rdev->mddev->pers->
 			hot_add_disk(rdev->mddev, rdev);
 			hot_add_disk(rdev->mddev, rdev);
 		if (err) {
 		if (err) {
@@ -5760,6 +5767,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
 			    info->raid_disk < mddev->raid_disks) {
 			    info->raid_disk < mddev->raid_disks) {
 				rdev->raid_disk = info->raid_disk;
 				rdev->raid_disk = info->raid_disk;
 				set_bit(In_sync, &rdev->flags);
 				set_bit(In_sync, &rdev->flags);
+				clear_bit(Bitmap_sync, &rdev->flags);
 			} else
 			} else
 				rdev->raid_disk = -1;
 				rdev->raid_disk = -1;
 		} else
 		} else
@@ -7706,7 +7714,8 @@ static int remove_and_add_spares(struct mddev *mddev,
 		if (test_bit(Faulty, &rdev->flags))
 		if (test_bit(Faulty, &rdev->flags))
 			continue;
 			continue;
 		if (mddev->ro &&
 		if (mddev->ro &&
-		    rdev->saved_raid_disk < 0)
+		    ! (rdev->saved_raid_disk >= 0 &&
+		       !test_bit(Bitmap_sync, &rdev->flags)))
 			continue;
 			continue;
 
 
 		rdev->recovery_offset = 0;
 		rdev->recovery_offset = 0;
@@ -7787,9 +7796,12 @@ void md_check_recovery(struct mddev *mddev)
 			 * As we only add devices that are already in-sync,
 			 * As we only add devices that are already in-sync,
 			 * we can activate the spares immediately.
 			 * we can activate the spares immediately.
 			 */
 			 */
-			clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
 			remove_and_add_spares(mddev, NULL);
 			remove_and_add_spares(mddev, NULL);
-			mddev->pers->spare_active(mddev);
+			/* There is no thread, but we need to call
+			 * ->spare_active and clear saved_raid_disk
+			 */
+			md_reap_sync_thread(mddev);
+			clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
 			goto unlock;
 			goto unlock;
 		}
 		}
 
 

+ 3 - 0
drivers/md/md.h

@@ -129,6 +129,9 @@ struct md_rdev {
 enum flag_bits {
 enum flag_bits {
 	Faulty,			/* device is known to have a fault */
 	Faulty,			/* device is known to have a fault */
 	In_sync,		/* device is in_sync with rest of array */
 	In_sync,		/* device is in_sync with rest of array */
+	Bitmap_sync,		/* ..actually, not quite In_sync.  Need a
+				 * bitmap-based recovery to get fully in sync
+				 */
 	Unmerged,		/* device is being added to array and should
 	Unmerged,		/* device is being added to array and should
 				 * be considerred for bvec_merge_fn but not
 				 * be considerred for bvec_merge_fn but not
 				 * yet for actual IO
 				 * yet for actual IO

+ 1 - 2
drivers/md/raid1.c

@@ -924,9 +924,8 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
 				conf->next_window_requests++;
 				conf->next_window_requests++;
 			else
 			else
 				conf->current_window_requests++;
 				conf->current_window_requests++;
-		}
-		if (bio->bi_sector >= conf->start_next_window)
 			sector = conf->start_next_window;
 			sector = conf->start_next_window;
+		}
 	}
 	}
 
 
 	conf->nr_pending++;
 	conf->nr_pending++;

+ 6 - 6
drivers/md/raid10.c

@@ -1319,7 +1319,7 @@ read_again:
 			/* Could not read all from this device, so we will
 			/* Could not read all from this device, so we will
 			 * need another r10_bio.
 			 * need another r10_bio.
 			 */
 			 */
-			sectors_handled = (r10_bio->sectors + max_sectors
+			sectors_handled = (r10_bio->sector + max_sectors
 					   - bio->bi_sector);
 					   - bio->bi_sector);
 			r10_bio->sectors = max_sectors;
 			r10_bio->sectors = max_sectors;
 			spin_lock_irq(&conf->device_lock);
 			spin_lock_irq(&conf->device_lock);
@@ -1327,7 +1327,7 @@ read_again:
 				bio->bi_phys_segments = 2;
 				bio->bi_phys_segments = 2;
 			else
 			else
 				bio->bi_phys_segments++;
 				bio->bi_phys_segments++;
-			spin_unlock(&conf->device_lock);
+			spin_unlock_irq(&conf->device_lock);
 			/* Cannot call generic_make_request directly
 			/* Cannot call generic_make_request directly
 			 * as that will be queued in __generic_make_request
 			 * as that will be queued in __generic_make_request
 			 * and subsequent mempool_alloc might block
 			 * and subsequent mempool_alloc might block
@@ -3218,10 +3218,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
 			if (j == conf->copies) {
 			if (j == conf->copies) {
 				/* Cannot recover, so abort the recovery or
 				/* Cannot recover, so abort the recovery or
 				 * record a bad block */
 				 * record a bad block */
-				put_buf(r10_bio);
-				if (rb2)
-					atomic_dec(&rb2->remaining);
-				r10_bio = rb2;
 				if (any_working) {
 				if (any_working) {
 					/* problem is that there are bad blocks
 					/* problem is that there are bad blocks
 					 * on other device(s)
 					 * on other device(s)
@@ -3253,6 +3249,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
 					mirror->recovery_disabled
 					mirror->recovery_disabled
 						= mddev->recovery_disabled;
 						= mddev->recovery_disabled;
 				}
 				}
+				put_buf(r10_bio);
+				if (rb2)
+					atomic_dec(&rb2->remaining);
+				r10_bio = rb2;
 				break;
 				break;
 			}
 			}
 		}
 		}

+ 4 - 3
drivers/md/raid5.c

@@ -687,7 +687,8 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
 			} else {
 			} else {
 				if (!test_bit(STRIPE_HANDLE, &sh->state))
 				if (!test_bit(STRIPE_HANDLE, &sh->state))
 					atomic_inc(&conf->active_stripes);
 					atomic_inc(&conf->active_stripes);
-				BUG_ON(list_empty(&sh->lru));
+				BUG_ON(list_empty(&sh->lru) &&
+				       !test_bit(STRIPE_EXPANDING, &sh->state));
 				list_del_init(&sh->lru);
 				list_del_init(&sh->lru);
 				if (sh->group) {
 				if (sh->group) {
 					sh->group->stripes_cnt--;
 					sh->group->stripes_cnt--;
@@ -3608,7 +3609,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
 			 */
 			 */
 			set_bit(R5_Insync, &dev->flags);
 			set_bit(R5_Insync, &dev->flags);
 
 
-		if (rdev && test_bit(R5_WriteError, &dev->flags)) {
+		if (test_bit(R5_WriteError, &dev->flags)) {
 			/* This flag does not apply to '.replacement'
 			/* This flag does not apply to '.replacement'
 			 * only to .rdev, so make sure to check that*/
 			 * only to .rdev, so make sure to check that*/
 			struct md_rdev *rdev2 = rcu_dereference(
 			struct md_rdev *rdev2 = rcu_dereference(
@@ -3621,7 +3622,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
 			} else
 			} else
 				clear_bit(R5_WriteError, &dev->flags);
 				clear_bit(R5_WriteError, &dev->flags);
 		}
 		}
-		if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
+		if (test_bit(R5_MadeGood, &dev->flags)) {
 			/* This flag does not apply to '.replacement'
 			/* This flag does not apply to '.replacement'
 			 * only to .rdev, so make sure to check that*/
 			 * only to .rdev, so make sure to check that*/
 			struct md_rdev *rdev2 = rcu_dereference(
 			struct md_rdev *rdev2 = rcu_dereference(

+ 15 - 14
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c

@@ -12942,25 +12942,26 @@ static void __bnx2x_remove(struct pci_dev *pdev,
 		pci_set_power_state(pdev, PCI_D3hot);
 		pci_set_power_state(pdev, PCI_D3hot);
 	}
 	}
 
 
-	if (bp->regview)
-		iounmap(bp->regview);
+	if (remove_netdev) {
+		if (bp->regview)
+			iounmap(bp->regview);
 
 
-	/* for vf doorbells are part of the regview and were unmapped along with
-	 * it. FW is only loaded by PF.
-	 */
-	if (IS_PF(bp)) {
-		if (bp->doorbells)
-			iounmap(bp->doorbells);
+		/* For vfs, doorbells are part of the regview and were unmapped
+		 * along with it. FW is only loaded by PF.
+		 */
+		if (IS_PF(bp)) {
+			if (bp->doorbells)
+				iounmap(bp->doorbells);
 
 
-		bnx2x_release_firmware(bp);
-	}
-	bnx2x_free_mem_bp(bp);
+			bnx2x_release_firmware(bp);
+		}
+		bnx2x_free_mem_bp(bp);
 
 
-	if (remove_netdev)
 		free_netdev(dev);
 		free_netdev(dev);
 
 
-	if (atomic_read(&pdev->enable_cnt) == 1)
-		pci_release_regions(pdev);
+		if (atomic_read(&pdev->enable_cnt) == 1)
+			pci_release_regions(pdev);
+	}
 
 
 	pci_disable_device(pdev);
 	pci_disable_device(pdev);
 }
 }

+ 1 - 1
drivers/net/ethernet/chelsio/cxgb4/l2t.c

@@ -423,7 +423,7 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
 	 * in the Compressed Filter Tuple.
 	 * in the Compressed Filter Tuple.
 	 */
 	 */
 	if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE)
 	if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE)
-		ntuple |= (F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift;
+		ntuple |= (u64)(F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift;
 
 
 	if (tp->port_shift >= 0)
 	if (tp->port_shift >= 0)
 		ntuple |= (u64)l2t->lport << tp->port_shift;
 		ntuple |= (u64)l2t->lport << tp->port_shift;

+ 9 - 2
drivers/net/ethernet/emulex/benet/be_main.c

@@ -1776,6 +1776,7 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
 	struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
 	struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
 	struct be_queue_info *rxq = &rxo->q;
 	struct be_queue_info *rxq = &rxo->q;
 	struct page *pagep = NULL;
 	struct page *pagep = NULL;
+	struct device *dev = &adapter->pdev->dev;
 	struct be_eth_rx_d *rxd;
 	struct be_eth_rx_d *rxd;
 	u64 page_dmaaddr = 0, frag_dmaaddr;
 	u64 page_dmaaddr = 0, frag_dmaaddr;
 	u32 posted, page_offset = 0;
 	u32 posted, page_offset = 0;
@@ -1788,9 +1789,15 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
 				rx_stats(rxo)->rx_post_fail++;
 				rx_stats(rxo)->rx_post_fail++;
 				break;
 				break;
 			}
 			}
-			page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
-						    0, adapter->big_page_size,
+			page_dmaaddr = dma_map_page(dev, pagep, 0,
+						    adapter->big_page_size,
 						    DMA_FROM_DEVICE);
 						    DMA_FROM_DEVICE);
+			if (dma_mapping_error(dev, page_dmaaddr)) {
+				put_page(pagep);
+				pagep = NULL;
+				rx_stats(rxo)->rx_post_fail++;
+				break;
+			}
 			page_info->page_offset = 0;
 			page_info->page_offset = 0;
 		} else {
 		} else {
 			get_page(pagep);
 			get_page(pagep);

+ 2 - 6
drivers/net/ethernet/intel/e1000e/netdev.c

@@ -6174,7 +6174,7 @@ static int __e1000_resume(struct pci_dev *pdev)
 	return 0;
 	return 0;
 }
 }
 
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int e1000_suspend(struct device *dev)
 static int e1000_suspend(struct device *dev)
 {
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct pci_dev *pdev = to_pci_dev(dev);
@@ -6193,7 +6193,7 @@ static int e1000_resume(struct device *dev)
 
 
 	return __e1000_resume(pdev);
 	return __e1000_resume(pdev);
 }
 }
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
 
 
 #ifdef CONFIG_PM_RUNTIME
 #ifdef CONFIG_PM_RUNTIME
 static int e1000_runtime_suspend(struct device *dev)
 static int e1000_runtime_suspend(struct device *dev)
@@ -7015,13 +7015,11 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
 };
 };
 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
 
 
-#ifdef CONFIG_PM
 static const struct dev_pm_ops e1000_pm_ops = {
 static const struct dev_pm_ops e1000_pm_ops = {
 	SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
 	SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
 	SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume,
 	SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume,
 			   e1000_idle)
 			   e1000_idle)
 };
 };
-#endif
 
 
 /* PCI Device API Driver */
 /* PCI Device API Driver */
 static struct pci_driver e1000_driver = {
 static struct pci_driver e1000_driver = {
@@ -7029,11 +7027,9 @@ static struct pci_driver e1000_driver = {
 	.id_table = e1000_pci_tbl,
 	.id_table = e1000_pci_tbl,
 	.probe    = e1000_probe,
 	.probe    = e1000_probe,
 	.remove   = e1000_remove,
 	.remove   = e1000_remove,
-#ifdef CONFIG_PM
 	.driver   = {
 	.driver   = {
 		.pm = &e1000_pm_ops,
 		.pm = &e1000_pm_ops,
 	},
 	},
-#endif
 	.shutdown = e1000_shutdown,
 	.shutdown = e1000_shutdown,
 	.err_handler = &e1000_err_handler
 	.err_handler = &e1000_err_handler
 };
 };

+ 2 - 0
drivers/net/ethernet/qlogic/qlge/qlge_main.c

@@ -4765,6 +4765,8 @@ static int qlge_probe(struct pci_dev *pdev,
 			    NETIF_F_RXCSUM;
 			    NETIF_F_RXCSUM;
 	ndev->features = ndev->hw_features;
 	ndev->features = ndev->hw_features;
 	ndev->vlan_features = ndev->hw_features;
 	ndev->vlan_features = ndev->hw_features;
+	/* vlan gets same features (except vlan filter) */
+	ndev->vlan_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
 
 
 	if (test_bit(QL_DMA64, &qdev->flags))
 	if (test_bit(QL_DMA64, &qdev->flags))
 		ndev->features |= NETIF_F_HIGHDMA;
 		ndev->features |= NETIF_F_HIGHDMA;

+ 1 - 0
drivers/net/ethernet/via/via-rhine.c

@@ -1618,6 +1618,7 @@ static void rhine_reset_task(struct work_struct *work)
 		goto out_unlock;
 		goto out_unlock;
 
 
 	napi_disable(&rp->napi);
 	napi_disable(&rp->napi);
+	netif_tx_disable(dev);
 	spin_lock_bh(&rp->lock);
 	spin_lock_bh(&rp->lock);
 
 
 	/* clear all descriptors */
 	/* clear all descriptors */

+ 12 - 0
drivers/net/usb/dm9601.c

@@ -614,6 +614,18 @@ static const struct usb_device_id products[] = {
 	 USB_DEVICE(0x0a46, 0x9621),	/* DM9621A USB to Fast Ethernet Adapter */
 	 USB_DEVICE(0x0a46, 0x9621),	/* DM9621A USB to Fast Ethernet Adapter */
 	 .driver_info = (unsigned long)&dm9601_info,
 	 .driver_info = (unsigned long)&dm9601_info,
 	},
 	},
+	{
+	 USB_DEVICE(0x0a46, 0x9622),	/* DM9622 USB to Fast Ethernet Adapter */
+	 .driver_info = (unsigned long)&dm9601_info,
+	},
+	{
+	 USB_DEVICE(0x0a46, 0x0269),	/* DM962OA USB to Fast Ethernet Adapter */
+	 .driver_info = (unsigned long)&dm9601_info,
+	},
+	{
+	 USB_DEVICE(0x0a46, 0x1269),	/* DM9621A USB to Fast Ethernet Adapter */
+	 .driver_info = (unsigned long)&dm9601_info,
+	},
 	{},			// END
 	{},			// END
 };
 };
 
 

+ 1 - 1
drivers/net/usb/usbnet.c

@@ -1245,7 +1245,7 @@ static int build_dma_sg(const struct sk_buff *skb, struct urb *urb)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
 	urb->num_sgs = num_sgs;
 	urb->num_sgs = num_sgs;
-	sg_init_table(urb->sg, urb->num_sgs);
+	sg_init_table(urb->sg, urb->num_sgs + 1);
 
 
 	sg_set_buf(&urb->sg[s++], skb->data, skb_headlen(skb));
 	sg_set_buf(&urb->sg[s++], skb->data, skb_headlen(skb));
 	total_len += skb_headlen(skb);
 	total_len += skb_headlen(skb);

+ 0 - 1
drivers/pinctrl/pinctrl-baytrail.c

@@ -512,7 +512,6 @@ static const struct dev_pm_ops byt_gpio_pm_ops = {
 
 
 static const struct acpi_device_id byt_gpio_acpi_match[] = {
 static const struct acpi_device_id byt_gpio_acpi_match[] = {
 	{ "INT33B2", 0 },
 	{ "INT33B2", 0 },
-	{ "INT33FC", 0 },
 	{ }
 	{ }
 };
 };
 MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match);
 MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match);

+ 6 - 1
fs/dcache.c

@@ -3061,8 +3061,13 @@ char *d_path(const struct path *path, char *buf, int buflen)
 	 * thus don't need to be hashed.  They also don't need a name until a
 	 * thus don't need to be hashed.  They also don't need a name until a
 	 * user wants to identify the object in /proc/pid/fd/.  The little hack
 	 * user wants to identify the object in /proc/pid/fd/.  The little hack
 	 * below allows us to generate a name for these objects on demand:
 	 * below allows us to generate a name for these objects on demand:
+	 *
+	 * Some pseudo inodes are mountable.  When they are mounted
+	 * path->dentry == path->mnt->mnt_root.  In that case don't call d_dname
+	 * and instead have d_path return the mounted path.
 	 */
 	 */
-	if (path->dentry->d_op && path->dentry->d_op->d_dname)
+	if (path->dentry->d_op && path->dentry->d_op->d_dname &&
+	    (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root))
 		return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
 		return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
 
 
 	rcu_read_lock();
 	rcu_read_lock();

+ 9 - 6
fs/fs-writeback.c

@@ -516,13 +516,16 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
 	}
 	}
 	WARN_ON(inode->i_state & I_SYNC);
 	WARN_ON(inode->i_state & I_SYNC);
 	/*
 	/*
-	 * Skip inode if it is clean. We don't want to mess with writeback
-	 * lists in this function since flusher thread may be doing for example
-	 * sync in parallel and if we move the inode, it could get skipped. So
-	 * here we make sure inode is on some writeback list and leave it there
-	 * unless we have completely cleaned the inode.
+	 * Skip inode if it is clean and we have no outstanding writeback in
+	 * WB_SYNC_ALL mode. We don't want to mess with writeback lists in this
+	 * function since flusher thread may be doing for example sync in
+	 * parallel and if we move the inode, it could get skipped. So here we
+	 * make sure inode is on some writeback list and leave it there unless
+	 * we have completely cleaned the inode.
 	 */
 	 */
-	if (!(inode->i_state & I_DIRTY))
+	if (!(inode->i_state & I_DIRTY) &&
+	    (wbc->sync_mode != WB_SYNC_ALL ||
+	     !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
 		goto out;
 		goto out;
 	inode->i_state |= I_SYNC;
 	inode->i_state |= I_SYNC;
 	spin_unlock(&inode->i_lock);
 	spin_unlock(&inode->i_lock);

+ 1 - 1
fs/namespace.c

@@ -2886,7 +2886,7 @@ bool fs_fully_visible(struct file_system_type *type)
 			struct inode *inode = child->mnt_mountpoint->d_inode;
 			struct inode *inode = child->mnt_mountpoint->d_inode;
 			if (!S_ISDIR(inode->i_mode))
 			if (!S_ISDIR(inode->i_mode))
 				goto next;
 				goto next;
-			if (inode->i_nlink != 2)
+			if (inode->i_nlink > 2)
 				goto next;
 				goto next;
 		}
 		}
 		visible = true;
 		visible = true;

+ 6 - 4
fs/nilfs2/segment.c

@@ -1440,17 +1440,19 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
 
 
 		nilfs_clear_logs(&sci->sc_segbufs);
 		nilfs_clear_logs(&sci->sc_segbufs);
 
 
-		err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
-		if (unlikely(err))
-			return err;
-
 		if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
 		if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
 			err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
 			err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
 							sci->sc_freesegs,
 							sci->sc_freesegs,
 							sci->sc_nfreesegs,
 							sci->sc_nfreesegs,
 							NULL);
 							NULL);
 			WARN_ON(err); /* do not happen */
 			WARN_ON(err); /* do not happen */
+			sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
 		}
 		}
+
+		err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
+		if (unlikely(err))
+			return err;
+
 		nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
 		nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
 		sci->sc_stage = prev_stage;
 		sci->sc_stage = prev_stage;
 	}
 	}

+ 2 - 0
include/linux/crash_dump.h

@@ -6,6 +6,8 @@
 #include <linux/proc_fs.h>
 #include <linux/proc_fs.h>
 #include <linux/elf.h>
 #include <linux/elf.h>
 
 
+#include <asm/pgtable.h> /* for pgprot_t */
+
 #define ELFCORE_ADDR_MAX	(-1ULL)
 #define ELFCORE_ADDR_MAX	(-1ULL)
 #define ELFCORE_ADDR_ERR	(-2ULL)
 #define ELFCORE_ADDR_ERR	(-2ULL)
 
 

+ 1 - 1
include/linux/i2c.h

@@ -445,7 +445,7 @@ static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data)
 static inline struct i2c_adapter *
 static inline struct i2c_adapter *
 i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter)
 i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter)
 {
 {
-#if IS_ENABLED(I2C_MUX)
+#if IS_ENABLED(CONFIG_I2C_MUX)
 	struct device *parent = adapter->dev.parent;
 	struct device *parent = adapter->dev.parent;
 
 
 	if (parent != NULL && parent->type == &i2c_adapter_type)
 	if (parent != NULL && parent->type == &i2c_adapter_type)

+ 19 - 8
include/linux/seqlock.h

@@ -117,15 +117,15 @@ repeat:
 }
 }
 
 
 /**
 /**
- * read_seqcount_begin_no_lockdep - start seq-read critical section w/o lockdep
+ * raw_read_seqcount_begin - start seq-read critical section w/o lockdep
  * @s: pointer to seqcount_t
  * @s: pointer to seqcount_t
  * Returns: count to be passed to read_seqcount_retry
  * Returns: count to be passed to read_seqcount_retry
  *
  *
- * read_seqcount_begin_no_lockdep opens a read critical section of the given
+ * raw_read_seqcount_begin opens a read critical section of the given
  * seqcount, but without any lockdep checking. Validity of the critical
  * seqcount, but without any lockdep checking. Validity of the critical
  * section is tested by checking read_seqcount_retry function.
  * section is tested by checking read_seqcount_retry function.
  */
  */
-static inline unsigned read_seqcount_begin_no_lockdep(const seqcount_t *s)
+static inline unsigned raw_read_seqcount_begin(const seqcount_t *s)
 {
 {
 	unsigned ret = __read_seqcount_begin(s);
 	unsigned ret = __read_seqcount_begin(s);
 	smp_rmb();
 	smp_rmb();
@@ -144,7 +144,7 @@ static inline unsigned read_seqcount_begin_no_lockdep(const seqcount_t *s)
 static inline unsigned read_seqcount_begin(const seqcount_t *s)
 static inline unsigned read_seqcount_begin(const seqcount_t *s)
 {
 {
 	seqcount_lockdep_reader_access(s);
 	seqcount_lockdep_reader_access(s);
-	return read_seqcount_begin_no_lockdep(s);
+	return raw_read_seqcount_begin(s);
 }
 }
 
 
 /**
 /**
@@ -206,14 +206,26 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
 }
 }
 
 
 
 
+
+static inline void raw_write_seqcount_begin(seqcount_t *s)
+{
+	s->sequence++;
+	smp_wmb();
+}
+
+static inline void raw_write_seqcount_end(seqcount_t *s)
+{
+	smp_wmb();
+	s->sequence++;
+}
+
 /*
 /*
  * Sequence counter only version assumes that callers are using their
  * Sequence counter only version assumes that callers are using their
  * own mutexing.
  * own mutexing.
  */
  */
 static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
 static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
 {
 {
-	s->sequence++;
-	smp_wmb();
+	raw_write_seqcount_begin(s);
 	seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
 	seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
 }
 }
 
 
@@ -225,8 +237,7 @@ static inline void write_seqcount_begin(seqcount_t *s)
 static inline void write_seqcount_end(seqcount_t *s)
 static inline void write_seqcount_end(seqcount_t *s)
 {
 {
 	seqcount_release(&s->dep_map, 1, _RET_IP_);
 	seqcount_release(&s->dep_map, 1, _RET_IP_);
-	smp_wmb();
-	s->sequence++;
+	raw_write_seqcount_end(s);
 }
 }
 
 
 /**
 /**

+ 0 - 1
include/net/if_inet6.h

@@ -165,7 +165,6 @@ struct inet6_dev {
 	struct net_device	*dev;
 	struct net_device	*dev;
 
 
 	struct list_head	addr_list;
 	struct list_head	addr_list;
-	int			valid_ll_addr_cnt;
 
 
 	struct ifmcaddr6	*mc_list;
 	struct ifmcaddr6	*mc_list;
 	struct ifmcaddr6	*mc_tomb;
 	struct ifmcaddr6	*mc_tomb;

+ 1 - 1
kernel/fork.c

@@ -1172,7 +1172,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
 	 * do not allow it to share a thread group or signal handlers or
 	 * do not allow it to share a thread group or signal handlers or
 	 * parent with the forking task.
 	 * parent with the forking task.
 	 */
 	 */
-	if (clone_flags & (CLONE_SIGHAND | CLONE_PARENT)) {
+	if (clone_flags & CLONE_SIGHAND) {
 		if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
 		if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
 		    (task_active_pid_ns(current) !=
 		    (task_active_pid_ns(current) !=
 				current->nsproxy->pid_ns_for_children))
 				current->nsproxy->pid_ns_for_children))

+ 1 - 1
kernel/sched/fair.c

@@ -3923,7 +3923,7 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
 {
 {
 	struct sched_entity *se = tg->se[cpu];
 	struct sched_entity *se = tg->se[cpu];
 
 
-	if (!tg->parent || !wl)	/* the trivial, non-cgroup case */
+	if (!tg->parent)	/* the trivial, non-cgroup case */
 		return wl;
 		return wl;
 
 
 	for_each_sched_entity(se) {
 	for_each_sched_entity(se) {

+ 3 - 3
kernel/time/sched_clock.c

@@ -74,7 +74,7 @@ unsigned long long notrace sched_clock(void)
 		return cd.epoch_ns;
 		return cd.epoch_ns;
 
 
 	do {
 	do {
-		seq = read_seqcount_begin(&cd.seq);
+		seq = raw_read_seqcount_begin(&cd.seq);
 		epoch_cyc = cd.epoch_cyc;
 		epoch_cyc = cd.epoch_cyc;
 		epoch_ns = cd.epoch_ns;
 		epoch_ns = cd.epoch_ns;
 	} while (read_seqcount_retry(&cd.seq, seq));
 	} while (read_seqcount_retry(&cd.seq, seq));
@@ -99,10 +99,10 @@ static void notrace update_sched_clock(void)
 			  cd.mult, cd.shift);
 			  cd.mult, cd.shift);
 
 
 	raw_local_irq_save(flags);
 	raw_local_irq_save(flags);
-	write_seqcount_begin(&cd.seq);
+	raw_write_seqcount_begin(&cd.seq);
 	cd.epoch_ns = ns;
 	cd.epoch_ns = ns;
 	cd.epoch_cyc = cyc;
 	cd.epoch_cyc = cyc;
-	write_seqcount_end(&cd.seq);
+	raw_write_seqcount_end(&cd.seq);
 	raw_local_irq_restore(flags);
 	raw_local_irq_restore(flags);
 }
 }
 
 

+ 2 - 2
lib/percpu_counter.c

@@ -82,10 +82,10 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
 		unsigned long flags;
 		unsigned long flags;
 		raw_spin_lock_irqsave(&fbc->lock, flags);
 		raw_spin_lock_irqsave(&fbc->lock, flags);
 		fbc->count += count;
 		fbc->count += count;
+		__this_cpu_sub(*fbc->counters, count - amount);
 		raw_spin_unlock_irqrestore(&fbc->lock, flags);
 		raw_spin_unlock_irqrestore(&fbc->lock, flags);
-		__this_cpu_write(*fbc->counters, 0);
 	} else {
 	} else {
-		__this_cpu_write(*fbc->counters, count);
+		this_cpu_add(*fbc->counters, amount);
 	}
 	}
 	preempt_enable();
 	preempt_enable();
 }
 }

+ 4 - 1
mm/util.c

@@ -390,7 +390,10 @@ struct address_space *page_mapping(struct page *page)
 {
 {
 	struct address_space *mapping = page->mapping;
 	struct address_space *mapping = page->mapping;
 
 
-	VM_BUG_ON(PageSlab(page));
+	/* This happens if someone calls flush_dcache_page on slab page */
+	if (unlikely(PageSlab(page)))
+		return NULL;
+
 	if (unlikely(PageSwapCache(page))) {
 	if (unlikely(PageSwapCache(page))) {
 		swp_entry_t entry;
 		swp_entry_t entry;
 
 

+ 1 - 1
net/batman-adv/main.c

@@ -277,7 +277,7 @@ int batadv_max_header_len(void)
 			   sizeof(struct batadv_coded_packet));
 			   sizeof(struct batadv_coded_packet));
 #endif
 #endif
 
 
-	return header_len;
+	return header_len + ETH_HLEN;
 }
 }
 
 
 /**
 /**

+ 2 - 28
net/core/filter.c

@@ -36,7 +36,6 @@
 #include <asm/uaccess.h>
 #include <asm/uaccess.h>
 #include <asm/unaligned.h>
 #include <asm/unaligned.h>
 #include <linux/filter.h>
 #include <linux/filter.h>
-#include <linux/reciprocal_div.h>
 #include <linux/ratelimit.h>
 #include <linux/ratelimit.h>
 #include <linux/seccomp.h>
 #include <linux/seccomp.h>
 #include <linux/if_vlan.h>
 #include <linux/if_vlan.h>
@@ -166,7 +165,7 @@ unsigned int sk_run_filter(const struct sk_buff *skb,
 			A /= X;
 			A /= X;
 			continue;
 			continue;
 		case BPF_S_ALU_DIV_K:
 		case BPF_S_ALU_DIV_K:
-			A = reciprocal_divide(A, K);
+			A /= K;
 			continue;
 			continue;
 		case BPF_S_ALU_MOD_X:
 		case BPF_S_ALU_MOD_X:
 			if (X == 0)
 			if (X == 0)
@@ -553,11 +552,6 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
 		/* Some instructions need special checks */
 		/* Some instructions need special checks */
 		switch (code) {
 		switch (code) {
 		case BPF_S_ALU_DIV_K:
 		case BPF_S_ALU_DIV_K:
-			/* check for division by zero */
-			if (ftest->k == 0)
-				return -EINVAL;
-			ftest->k = reciprocal_value(ftest->k);
-			break;
 		case BPF_S_ALU_MOD_K:
 		case BPF_S_ALU_MOD_K:
 			/* check for division by zero */
 			/* check for division by zero */
 			if (ftest->k == 0)
 			if (ftest->k == 0)
@@ -853,27 +847,7 @@ void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
 	to->code = decodes[code];
 	to->code = decodes[code];
 	to->jt = filt->jt;
 	to->jt = filt->jt;
 	to->jf = filt->jf;
 	to->jf = filt->jf;
-
-	if (code == BPF_S_ALU_DIV_K) {
-		/*
-		 * When loaded this rule user gave us X, which was
-		 * translated into R = r(X). Now we calculate the
-		 * RR = r(R) and report it back. If next time this
-		 * value is loaded and RRR = r(RR) is calculated
-		 * then the R == RRR will be true.
-		 *
-		 * One exception. X == 1 translates into R == 0 and
-		 * we can't calculate RR out of it with r().
-		 */
-
-		if (filt->k == 0)
-			to->k = 1;
-		else
-			to->k = reciprocal_value(filt->k);
-
-		BUG_ON(reciprocal_value(to->k) != filt->k);
-	} else
-		to->k = filt->k;
+	to->k = filt->k;
 }
 }
 
 
 int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, unsigned int len)
 int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, unsigned int len)

+ 4 - 2
net/ieee802154/nl-phy.c

@@ -221,8 +221,10 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
 
 
 	if (info->attrs[IEEE802154_ATTR_DEV_TYPE]) {
 	if (info->attrs[IEEE802154_ATTR_DEV_TYPE]) {
 		type = nla_get_u8(info->attrs[IEEE802154_ATTR_DEV_TYPE]);
 		type = nla_get_u8(info->attrs[IEEE802154_ATTR_DEV_TYPE]);
-		if (type >= __IEEE802154_DEV_MAX)
-			return -EINVAL;
+		if (type >= __IEEE802154_DEV_MAX) {
+			rc = -EINVAL;
+			goto nla_put_failure;
+		}
 	}
 	}
 
 
 	dev = phy->add_iface(phy, devname, type);
 	dev = phy->add_iface(phy, devname, type);

+ 4 - 1
net/ipv4/inet_diag.c

@@ -930,12 +930,15 @@ skip_listen_ht:
 		spin_lock_bh(lock);
 		spin_lock_bh(lock);
 		sk_nulls_for_each(sk, node, &head->chain) {
 		sk_nulls_for_each(sk, node, &head->chain) {
 			int res;
 			int res;
+			int state;
 
 
 			if (!net_eq(sock_net(sk), net))
 			if (!net_eq(sock_net(sk), net))
 				continue;
 				continue;
 			if (num < s_num)
 			if (num < s_num)
 				goto next_normal;
 				goto next_normal;
-			if (!(r->idiag_states & (1 << sk->sk_state)))
+			state = (sk->sk_state == TCP_TIME_WAIT) ?
+				inet_twsk(sk)->tw_substate : sk->sk_state;
+			if (!(r->idiag_states & (1 << state)))
 				goto next_normal;
 				goto next_normal;
 			if (r->sdiag_family != AF_UNSPEC &&
 			if (r->sdiag_family != AF_UNSPEC &&
 			    sk->sk_family != r->sdiag_family)
 			    sk->sk_family != r->sdiag_family)

+ 5 - 2
net/ipv4/ipmr.c

@@ -157,9 +157,12 @@ static struct mr_table *ipmr_get_table(struct net *net, u32 id)
 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
 			   struct mr_table **mrt)
 			   struct mr_table **mrt)
 {
 {
-	struct ipmr_result res;
-	struct fib_lookup_arg arg = { .result = &res, };
 	int err;
 	int err;
+	struct ipmr_result res;
+	struct fib_lookup_arg arg = {
+		.result = &res,
+		.flags = FIB_LOOKUP_NOREF,
+	};
 
 
 	err = fib_rules_lookup(net->ipv4.mr_rules_ops,
 	err = fib_rules_lookup(net->ipv4.mr_rules_ops,
 			       flowi4_to_flowi(flp4), 0, &arg);
 			       flowi4_to_flowi(flp4), 0, &arg);

+ 32 - 19
net/ipv4/tcp_metrics.c

@@ -22,6 +22,9 @@
 
 
 int sysctl_tcp_nometrics_save __read_mostly;
 int sysctl_tcp_nometrics_save __read_mostly;
 
 
+static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr,
+						   struct net *net, unsigned int hash);
+
 struct tcp_fastopen_metrics {
 struct tcp_fastopen_metrics {
 	u16	mss;
 	u16	mss;
 	u16	syn_loss:10;		/* Recurring Fast Open SYN losses */
 	u16	syn_loss:10;		/* Recurring Fast Open SYN losses */
@@ -130,16 +133,41 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst,
 	}
 	}
 }
 }
 
 
+#define TCP_METRICS_TIMEOUT		(60 * 60 * HZ)
+
+static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
+{
+	if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
+		tcpm_suck_dst(tm, dst, false);
+}
+
+#define TCP_METRICS_RECLAIM_DEPTH	5
+#define TCP_METRICS_RECLAIM_PTR		(struct tcp_metrics_block *) 0x1UL
+
 static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
 static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
 					  struct inetpeer_addr *addr,
 					  struct inetpeer_addr *addr,
-					  unsigned int hash,
-					  bool reclaim)
+					  unsigned int hash)
 {
 {
 	struct tcp_metrics_block *tm;
 	struct tcp_metrics_block *tm;
 	struct net *net;
 	struct net *net;
+	bool reclaim = false;
 
 
 	spin_lock_bh(&tcp_metrics_lock);
 	spin_lock_bh(&tcp_metrics_lock);
 	net = dev_net(dst->dev);
 	net = dev_net(dst->dev);
+
+	/* While waiting for the spin-lock the cache might have been populated
+	 * with this entry and so we have to check again.
+	 */
+	tm = __tcp_get_metrics(addr, net, hash);
+	if (tm == TCP_METRICS_RECLAIM_PTR) {
+		reclaim = true;
+		tm = NULL;
+	}
+	if (tm) {
+		tcpm_check_stamp(tm, dst);
+		goto out_unlock;
+	}
+
 	if (unlikely(reclaim)) {
 	if (unlikely(reclaim)) {
 		struct tcp_metrics_block *oldest;
 		struct tcp_metrics_block *oldest;
 
 
@@ -169,17 +197,6 @@ out_unlock:
 	return tm;
 	return tm;
 }
 }
 
 
-#define TCP_METRICS_TIMEOUT		(60 * 60 * HZ)
-
-static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
-{
-	if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
-		tcpm_suck_dst(tm, dst, false);
-}
-
-#define TCP_METRICS_RECLAIM_DEPTH	5
-#define TCP_METRICS_RECLAIM_PTR		(struct tcp_metrics_block *) 0x1UL
-
 static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
 static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
 {
 {
 	if (tm)
 	if (tm)
@@ -282,7 +299,6 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
 	struct inetpeer_addr addr;
 	struct inetpeer_addr addr;
 	unsigned int hash;
 	unsigned int hash;
 	struct net *net;
 	struct net *net;
-	bool reclaim;
 
 
 	addr.family = sk->sk_family;
 	addr.family = sk->sk_family;
 	switch (addr.family) {
 	switch (addr.family) {
@@ -304,13 +320,10 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
 	hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
 	hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
 
 
 	tm = __tcp_get_metrics(&addr, net, hash);
 	tm = __tcp_get_metrics(&addr, net, hash);
-	reclaim = false;
-	if (tm == TCP_METRICS_RECLAIM_PTR) {
-		reclaim = true;
+	if (tm == TCP_METRICS_RECLAIM_PTR)
 		tm = NULL;
 		tm = NULL;
-	}
 	if (!tm && create)
 	if (!tm && create)
-		tm = tcpm_new(dst, &addr, hash, reclaim);
+		tm = tcpm_new(dst, &addr, hash);
 	else
 	else
 		tcpm_check_stamp(tm, dst);
 		tcpm_check_stamp(tm, dst);
 
 

+ 17 - 21
net/ipv6/addrconf.c

@@ -3189,6 +3189,22 @@ out:
 	in6_ifa_put(ifp);
 	in6_ifa_put(ifp);
 }
 }
 
 
+/* ifp->idev must be at least read locked */
+static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
+{
+	struct inet6_ifaddr *ifpiter;
+	struct inet6_dev *idev = ifp->idev;
+
+	list_for_each_entry(ifpiter, &idev->addr_list, if_list) {
+		if (ifp != ifpiter && ifpiter->scope == IFA_LINK &&
+		    (ifpiter->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|
+				       IFA_F_OPTIMISTIC|IFA_F_DADFAILED)) ==
+		    IFA_F_PERMANENT)
+			return false;
+	}
+	return true;
+}
+
 static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
 static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
 {
 {
 	struct net_device *dev = ifp->idev->dev;
 	struct net_device *dev = ifp->idev->dev;
@@ -3208,14 +3224,11 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
 	 */
 	 */
 
 
 	read_lock_bh(&ifp->idev->lock);
 	read_lock_bh(&ifp->idev->lock);
-	spin_lock(&ifp->lock);
-	send_mld = ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL &&
-		   ifp->idev->valid_ll_addr_cnt == 1;
+	send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
 	send_rs = send_mld &&
 	send_rs = send_mld &&
 		  ipv6_accept_ra(ifp->idev) &&
 		  ipv6_accept_ra(ifp->idev) &&
 		  ifp->idev->cnf.rtr_solicits > 0 &&
 		  ifp->idev->cnf.rtr_solicits > 0 &&
 		  (dev->flags&IFF_LOOPBACK) == 0;
 		  (dev->flags&IFF_LOOPBACK) == 0;
-	spin_unlock(&ifp->lock);
 	read_unlock_bh(&ifp->idev->lock);
 	read_unlock_bh(&ifp->idev->lock);
 
 
 	/* While dad is in progress mld report's source address is in6_addrany.
 	/* While dad is in progress mld report's source address is in6_addrany.
@@ -4512,19 +4525,6 @@ errout:
 		rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
 		rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
 }
 }
 
 
-static void update_valid_ll_addr_cnt(struct inet6_ifaddr *ifp, int count)
-{
-	write_lock_bh(&ifp->idev->lock);
-	spin_lock(&ifp->lock);
-	if (((ifp->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|
-			    IFA_F_DADFAILED)) == IFA_F_PERMANENT) &&
-	    (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL))
-		ifp->idev->valid_ll_addr_cnt += count;
-	WARN_ON(ifp->idev->valid_ll_addr_cnt < 0);
-	spin_unlock(&ifp->lock);
-	write_unlock_bh(&ifp->idev->lock);
-}
-
 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
 {
 {
 	struct net *net = dev_net(ifp->idev->dev);
 	struct net *net = dev_net(ifp->idev->dev);
@@ -4533,8 +4533,6 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
 
 
 	switch (event) {
 	switch (event) {
 	case RTM_NEWADDR:
 	case RTM_NEWADDR:
-		update_valid_ll_addr_cnt(ifp, 1);
-
 		/*
 		/*
 		 * If the address was optimistic
 		 * If the address was optimistic
 		 * we inserted the route at the start of
 		 * we inserted the route at the start of
@@ -4550,8 +4548,6 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
 					      ifp->idev->dev, 0, 0);
 					      ifp->idev->dev, 0, 0);
 		break;
 		break;
 	case RTM_DELADDR:
 	case RTM_DELADDR:
-		update_valid_ll_addr_cnt(ifp, -1);
-
 		if (ifp->idev->cnf.forwarding)
 		if (ifp->idev->cnf.forwarding)
 			addrconf_leave_anycast(ifp);
 			addrconf_leave_anycast(ifp);
 		addrconf_leave_solict(ifp->idev, &ifp->addr);
 		addrconf_leave_solict(ifp->idev, &ifp->addr);

+ 5 - 2
net/ipv6/ip6mr.c

@@ -141,9 +141,12 @@ static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
 			    struct mr6_table **mrt)
 			    struct mr6_table **mrt)
 {
 {
-	struct ip6mr_result res;
-	struct fib_lookup_arg arg = { .result = &res, };
 	int err;
 	int err;
+	struct ip6mr_result res;
+	struct fib_lookup_arg arg = {
+		.result = &res,
+		.flags = FIB_LOOKUP_NOREF,
+	};
 
 
 	err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
 	err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
 			       flowi6_to_flowi(flp6), 0, &arg);
 			       flowi6_to_flowi(flp6), 0, &arg);

+ 3 - 4
net/rds/ib_recv.c

@@ -421,8 +421,7 @@ static void rds_ib_recv_cache_put(struct list_head *new_item,
 				 struct rds_ib_refill_cache *cache)
 				 struct rds_ib_refill_cache *cache)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
-	struct list_head *old;
-	struct list_head __percpu *chpfirst;
+	struct list_head *old, *chpfirst;
 
 
 	local_irq_save(flags);
 	local_irq_save(flags);
 
 
@@ -432,7 +431,7 @@ static void rds_ib_recv_cache_put(struct list_head *new_item,
 	else /* put on front */
 	else /* put on front */
 		list_add_tail(new_item, chpfirst);
 		list_add_tail(new_item, chpfirst);
 
 
-	__this_cpu_write(chpfirst, new_item);
+	__this_cpu_write(cache->percpu->first, new_item);
 	__this_cpu_inc(cache->percpu->count);
 	__this_cpu_inc(cache->percpu->count);
 
 
 	if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT)
 	if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT)
@@ -452,7 +451,7 @@ static void rds_ib_recv_cache_put(struct list_head *new_item,
 	} while (old);
 	} while (old);
 
 
 
 
-	__this_cpu_write(chpfirst, NULL);
+	__this_cpu_write(cache->percpu->first, NULL);
 	__this_cpu_write(cache->percpu->count, 0);
 	__this_cpu_write(cache->percpu->count, 0);
 end:
 end:
 	local_irq_restore(flags);
 	local_irq_restore(flags);