Browse Source

Merge tag 'v3.14' into drm-intel-next-queued

Linux 3.14

The vt-d w/a merged late in 3.14-rc needs a bit of fine-tuning, hence
backmerge.

Conflicts:
	drivers/gpu/drm/i915/i915_gem_gtt.c
	drivers/gpu/drm/i915/intel_ddi.c
	drivers/gpu/drm/i915/intel_dp.c

All trivial adjacent lines changed type conflicts, so trivial git
doesn't even show them in the merg commit.

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Daniel Vetter 11 years ago
parent
commit
0654a65f26
100 changed files with 892 additions and 762 deletions
  1. 1 0
      Documentation/devicetree/bindings/net/micrel-ks8851.txt
  2. 2 2
      Documentation/networking/netlink_mmap.txt
  3. 8 6
      MAINTAINERS
  4. 1 1
      Makefile
  5. 1 1
      arch/arm/boot/dts/sama5d36.dtsi
  6. 15 9
      arch/mips/Kconfig
  7. 1 3
      arch/mips/alchemy/board-gpr.c
  8. 1 3
      arch/mips/alchemy/board-mtx1.c
  9. 1 0
      arch/mips/bcm47xx/board.c
  10. 1 1
      arch/mips/bcm47xx/nvram.c
  11. 12 10
      arch/mips/cavium-octeon/octeon-irq.c
  12. 13 2
      arch/mips/include/asm/asmmacro.h
  13. 1 1
      arch/mips/include/asm/fpu.h
  14. 10 10
      arch/mips/include/asm/ftrace.h
  15. 6 4
      arch/mips/include/asm/syscall.h
  16. 2 2
      arch/mips/include/uapi/asm/inst.h
  17. 2 3
      arch/mips/kernel/ftrace.c
  18. 8 8
      arch/mips/kernel/r4k_fpu.S
  19. 3 0
      arch/mips/kernel/rtlx-cmp.c
  20. 3 0
      arch/mips/kernel/rtlx-mt.c
  21. 3 3
      arch/mips/math-emu/cp1emu.c
  22. 1 1
      arch/mips/mti-malta/malta-amon.c
  23. 2 2
      arch/mips/mti-malta/malta-int.c
  24. 1 0
      arch/mips/pci/msi-octeon.c
  25. 0 11
      arch/parisc/include/asm/page.h
  26. 0 4
      arch/parisc/include/asm/spinlock.h
  27. 2 2
      arch/parisc/include/uapi/asm/unistd.h
  28. 0 64
      arch/parisc/kernel/cache.c
  29. 1 0
      arch/parisc/kernel/syscall_table.S
  30. 2 69
      arch/powerpc/kvm/book3s_hv_rmhandlers.S
  31. 3 1
      arch/sparc/kernel/process_64.c
  32. 2 2
      arch/sparc/kernel/syscalls.S
  33. 1 1
      arch/sparc/mm/tsb.c
  34. 2 12
      arch/x86/include/asm/pgtable.h
  35. 2 1
      arch/x86/include/asm/topology.h
  36. 1 19
      arch/x86/kernel/aperture_64.c
  37. 2 2
      arch/x86/xen/mmu.c
  38. 12 13
      block/blk-core.c
  39. 7 4
      block/blk-flush.c
  40. 1 1
      drivers/block/mtip32xx/mtip32xx.c
  41. 0 1
      drivers/block/rbd.c
  42. 1 1
      drivers/clocksource/vf_pit_timer.c
  43. 7 3
      drivers/gpu/drm/exynos/exynos_drm_drv.c
  44. 7 0
      drivers/gpu/drm/i915/i915_gem_stolen.c
  45. 41 30
      drivers/gpu/drm/i915/i915_irq.c
  46. 1 1
      drivers/gpu/drm/i915/intel_dp.c
  47. 10 4
      drivers/gpu/drm/nouveau/nouveau_drm.c
  48. 12 5
      drivers/gpu/drm/radeon/radeon_drv.c
  49. 8 3
      drivers/gpu/drm/udl/udl_gem.c
  50. 2 0
      drivers/hid/hid-lg4ff.c
  51. 12 15
      drivers/hid/hid-sony.c
  52. 2 2
      drivers/hid/hidraw.c
  53. 2 0
      drivers/i2c/busses/i2c-cpm.c
  54. 3 1
      drivers/input/evdev.c
  55. 11 1
      drivers/input/keyboard/adp5588-keys.c
  56. 16 13
      drivers/input/misc/da9052_onkey.c
  57. 0 1
      drivers/input/mouse/cypress_ps2.c
  58. 55 0
      drivers/input/mouse/synaptics.c
  59. 42 31
      drivers/input/mousedev.c
  60. 9 9
      drivers/isdn/capi/Kconfig
  61. 4 10
      drivers/net/ethernet/atheros/alx/main.c
  62. 2 2
      drivers/net/ethernet/atheros/atl1e/atl1e_main.c
  63. 56 55
      drivers/net/ethernet/broadcom/cnic.c
  64. 1 1
      drivers/net/ethernet/broadcom/cnic.h
  65. 1 1
      drivers/net/ethernet/broadcom/cnic_defs.h
  66. 13 3
      drivers/net/ethernet/broadcom/cnic_if.h
  67. 2 3
      drivers/net/ethernet/broadcom/tg3.c
  68. 20 40
      drivers/net/ethernet/marvell/mvneta.c
  69. 5 1
      drivers/net/ethernet/mellanox/mlx4/main.c
  70. 29 1
      drivers/net/ethernet/micrel/ks8851.c
  71. 3 1
      drivers/net/ethernet/qlogic/qlge/qlge_main.c
  72. 0 4
      drivers/net/ethernet/ti/cpsw.c
  73. 2 2
      drivers/net/ethernet/ti/davinci_cpdma.c
  74. 42 11
      drivers/net/ethernet/ti/davinci_emac.c
  75. 5 3
      drivers/net/ethernet/via/via-rhine.c
  76. 2 1
      drivers/net/ifb.c
  77. 1 2
      drivers/net/phy/phy_device.c
  78. 23 25
      drivers/net/usb/cdc_ncm.c
  79. 19 14
      drivers/net/usb/usbnet.c
  80. 4 1
      drivers/net/veth.c
  81. 3 3
      drivers/net/virtio_net.c
  82. 116 14
      drivers/net/vxlan.c
  83. 1 2
      drivers/net/wireless/ath/ath9k/hw.c
  84. 2 2
      drivers/net/wireless/ath/ath9k/xmit.c
  85. 3 1
      drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
  86. 3 3
      drivers/net/wireless/rt2x00/rt2800lib.c
  87. 8 8
      drivers/scsi/bnx2fc/bnx2fc_io.c
  88. 21 17
      drivers/scsi/bnx2fc/bnx2fc_tgt.c
  89. 26 26
      drivers/scsi/bnx2i/bnx2i_hwi.c
  90. 12 11
      drivers/scsi/bnx2i/bnx2i_iscsi.c
  91. 10 12
      drivers/tty/serial/sunhv.c
  92. 5 9
      drivers/tty/serial/sunsab.c
  93. 5 9
      drivers/tty/serial/sunsu.c
  94. 5 9
      drivers/tty/serial/sunzilog.c
  95. 19 1
      drivers/vhost/net.c
  96. 18 6
      drivers/xen/balloon.c
  97. 8 26
      fs/anon_inodes.c
  98. 2 2
      fs/dcache.c
  99. 9 6
      fs/ext4/inode.c
  100. 4 15
      fs/file.c

+ 1 - 0
Documentation/devicetree/bindings/net/micrel-ks8851.txt

@@ -7,3 +7,4 @@ Required properties:
 
 
 Optional properties:
 Optional properties:
 - local-mac-address : Ethernet mac address to use
 - local-mac-address : Ethernet mac address to use
+- vdd-supply:	supply for Ethernet mac

+ 2 - 2
Documentation/networking/netlink_mmap.txt

@@ -226,9 +226,9 @@ Ring setup:
 	void *rx_ring, *tx_ring;
 	void *rx_ring, *tx_ring;
 
 
 	/* Configure ring parameters */
 	/* Configure ring parameters */
-	if (setsockopt(fd, NETLINK_RX_RING, &req, sizeof(req)) < 0)
+	if (setsockopt(fd, SOL_NETLINK, NETLINK_RX_RING, &req, sizeof(req)) < 0)
 		exit(1);
 		exit(1);
-	if (setsockopt(fd, NETLINK_TX_RING, &req, sizeof(req)) < 0)
+	if (setsockopt(fd, SOL_NETLINK, NETLINK_TX_RING, &req, sizeof(req)) < 0)
 		exit(1)
 		exit(1)
 
 
 	/* Calculate size of each individual ring */
 	/* Calculate size of each individual ring */

+ 8 - 6
MAINTAINERS

@@ -911,11 +911,11 @@ F:	arch/arm/include/asm/hardware/dec21285.h
 F:	arch/arm/mach-footbridge/
 F:	arch/arm/mach-footbridge/
 
 
 ARM/FREESCALE IMX / MXC ARM ARCHITECTURE
 ARM/FREESCALE IMX / MXC ARM ARCHITECTURE
-M:	Shawn Guo <shawn.guo@linaro.org>
+M:	Shawn Guo <shawn.guo@freescale.com>
 M:	Sascha Hauer <kernel@pengutronix.de>
 M:	Sascha Hauer <kernel@pengutronix.de>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 S:	Maintained
-T:	git git://git.linaro.org/people/shawnguo/linux-2.6.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
 F:	arch/arm/mach-imx/
 F:	arch/arm/mach-imx/
 F:	arch/arm/boot/dts/imx*
 F:	arch/arm/boot/dts/imx*
 F:	arch/arm/configs/imx*_defconfig
 F:	arch/arm/configs/imx*_defconfig
@@ -1832,8 +1832,8 @@ F:	net/bluetooth/
 F:	include/net/bluetooth/
 F:	include/net/bluetooth/
 
 
 BONDING DRIVER
 BONDING DRIVER
-M:	Jay Vosburgh <fubar@us.ibm.com>
-M:	Veaceslav Falico <vfalico@redhat.com>
+M:	Jay Vosburgh <j.vosburgh@gmail.com>
+M:	Veaceslav Falico <vfalico@gmail.com>
 M:	Andy Gospodarek <andy@greyhouse.net>
 M:	Andy Gospodarek <andy@greyhouse.net>
 L:	netdev@vger.kernel.org
 L:	netdev@vger.kernel.org
 W:	http://sourceforge.net/projects/bonding/
 W:	http://sourceforge.net/projects/bonding/
@@ -2801,9 +2801,9 @@ S:	Supported
 F:	drivers/acpi/dock.c
 F:	drivers/acpi/dock.c
 
 
 DOCUMENTATION
 DOCUMENTATION
-M:	Rob Landley <rob@landley.net>
+M:	Randy Dunlap <rdunlap@infradead.org>
 L:	linux-doc@vger.kernel.org
 L:	linux-doc@vger.kernel.org
-T:	TBD
+T:	quilt http://www.infradead.org/~rdunlap/Doc/patches/
 S:	Maintained
 S:	Maintained
 F:	Documentation/
 F:	Documentation/
 
 
@@ -4545,6 +4545,7 @@ M:	Greg Rose <gregory.v.rose@intel.com>
 M:	Alex Duyck <alexander.h.duyck@intel.com>
 M:	Alex Duyck <alexander.h.duyck@intel.com>
 M:	John Ronciak <john.ronciak@intel.com>
 M:	John Ronciak <john.ronciak@intel.com>
 M:	Mitch Williams <mitch.a.williams@intel.com>
 M:	Mitch Williams <mitch.a.williams@intel.com>
+M:	Linux NICS <linux.nics@intel.com>
 L:	e1000-devel@lists.sourceforge.net
 L:	e1000-devel@lists.sourceforge.net
 W:	http://www.intel.com/support/feedback.htm
 W:	http://www.intel.com/support/feedback.htm
 W:	http://e1000.sourceforge.net/
 W:	http://e1000.sourceforge.net/
@@ -6005,6 +6006,7 @@ F:	include/uapi/linux/net.h
 F:	include/uapi/linux/netdevice.h
 F:	include/uapi/linux/netdevice.h
 F:	tools/net/
 F:	tools/net/
 F:	tools/testing/selftests/net/
 F:	tools/testing/selftests/net/
+F:	lib/random32.c
 
 
 NETWORKING [IPv4/IPv6]
 NETWORKING [IPv4/IPv6]
 M:	"David S. Miller" <davem@davemloft.net>
 M:	"David S. Miller" <davem@davemloft.net>

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 3
 VERSION = 3
 PATCHLEVEL = 14
 PATCHLEVEL = 14
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION =
 NAME = Shuffling Zombie Juror
 NAME = Shuffling Zombie Juror
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*

+ 1 - 1
arch/arm/boot/dts/sama5d36.dtsi

@@ -8,8 +8,8 @@
  */
  */
 #include "sama5d3.dtsi"
 #include "sama5d3.dtsi"
 #include "sama5d3_can.dtsi"
 #include "sama5d3_can.dtsi"
-#include "sama5d3_emac.dtsi"
 #include "sama5d3_gmac.dtsi"
 #include "sama5d3_gmac.dtsi"
+#include "sama5d3_emac.dtsi"
 #include "sama5d3_lcd.dtsi"
 #include "sama5d3_lcd.dtsi"
 #include "sama5d3_mci2.dtsi"
 #include "sama5d3_mci2.dtsi"
 #include "sama5d3_tcb1.dtsi"
 #include "sama5d3_tcb1.dtsi"

+ 15 - 9
arch/mips/Kconfig

@@ -1776,12 +1776,12 @@ endchoice
 
 
 config FORCE_MAX_ZONEORDER
 config FORCE_MAX_ZONEORDER
 	int "Maximum zone order"
 	int "Maximum zone order"
-	range 14 64 if HUGETLB_PAGE && PAGE_SIZE_64KB
-	default "14" if HUGETLB_PAGE && PAGE_SIZE_64KB
-	range 13 64 if HUGETLB_PAGE && PAGE_SIZE_32KB
-	default "13" if HUGETLB_PAGE && PAGE_SIZE_32KB
-	range 12 64 if HUGETLB_PAGE && PAGE_SIZE_16KB
-	default "12" if HUGETLB_PAGE && PAGE_SIZE_16KB
+	range 14 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_64KB
+	default "14" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_64KB
+	range 13 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_32KB
+	default "13" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_32KB
+	range 12 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_16KB
+	default "12" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_16KB
 	range 11 64
 	range 11 64
 	default "11"
 	default "11"
 	help
 	help
@@ -2353,9 +2353,8 @@ config SECCOMP
 	  If unsure, say Y. Only embedded should say N here.
 	  If unsure, say Y. Only embedded should say N here.
 
 
 config MIPS_O32_FP64_SUPPORT
 config MIPS_O32_FP64_SUPPORT
-	bool "Support for O32 binaries using 64-bit FP"
+	bool "Support for O32 binaries using 64-bit FP (EXPERIMENTAL)"
 	depends on 32BIT || MIPS32_O32
 	depends on 32BIT || MIPS32_O32
-	default y
 	help
 	help
 	  When this is enabled, the kernel will support use of 64-bit floating
 	  When this is enabled, the kernel will support use of 64-bit floating
 	  point registers with binaries using the O32 ABI along with the
 	  point registers with binaries using the O32 ABI along with the
@@ -2367,7 +2366,14 @@ config MIPS_O32_FP64_SUPPORT
 	  of your kernel & potentially improve FP emulation performance by
 	  of your kernel & potentially improve FP emulation performance by
 	  saying N here.
 	  saying N here.
 
 
-	  If unsure, say Y.
+	  Although binutils currently supports use of this flag the details
+	  concerning its effect upon the O32 ABI in userland are still being
+	  worked on. In order to avoid userland becoming dependant upon current
+	  behaviour before the details have been finalised, this option should
+	  be considered experimental and only enabled by those working upon
+	  said details.
+
+	  If unsure, say N.
 
 
 config USE_OF
 config USE_OF
 	bool
 	bool

+ 1 - 3
arch/mips/alchemy/board-gpr.c

@@ -53,10 +53,8 @@ void __init prom_init(void)
 	prom_init_cmdline();
 	prom_init_cmdline();
 
 
 	memsize_str = prom_getenv("memsize");
 	memsize_str = prom_getenv("memsize");
-	if (!memsize_str)
+	if (!memsize_str || kstrtoul(memsize_str, 0, &memsize))
 		memsize = 0x04000000;
 		memsize = 0x04000000;
-	else
-		strict_strtoul(memsize_str, 0, &memsize);
 	add_memory_region(0, memsize, BOOT_MEM_RAM);
 	add_memory_region(0, memsize, BOOT_MEM_RAM);
 }
 }
 
 

+ 1 - 3
arch/mips/alchemy/board-mtx1.c

@@ -52,10 +52,8 @@ void __init prom_init(void)
 	prom_init_cmdline();
 	prom_init_cmdline();
 
 
 	memsize_str = prom_getenv("memsize");
 	memsize_str = prom_getenv("memsize");
-	if (!memsize_str)
+	if (!memsize_str || kstrtoul(memsize_str, 0, &memsize))
 		memsize = 0x04000000;
 		memsize = 0x04000000;
-	else
-		strict_strtoul(memsize_str, 0, &memsize);
 	add_memory_region(0, memsize, BOOT_MEM_RAM);
 	add_memory_region(0, memsize, BOOT_MEM_RAM);
 }
 }
 
 

+ 1 - 0
arch/mips/bcm47xx/board.c

@@ -1,3 +1,4 @@
+#include <linux/errno.h>
 #include <linux/export.h>
 #include <linux/export.h>
 #include <linux/string.h>
 #include <linux/string.h>
 #include <bcm47xx_board.h>
 #include <bcm47xx_board.h>

+ 1 - 1
arch/mips/bcm47xx/nvram.c

@@ -196,7 +196,7 @@ int bcm47xx_nvram_gpio_pin(const char *name)
 	char nvram_var[10];
 	char nvram_var[10];
 	char buf[30];
 	char buf[30];
 
 
-	for (i = 0; i < 16; i++) {
+	for (i = 0; i < 32; i++) {
 		err = snprintf(nvram_var, sizeof(nvram_var), "gpio%i", i);
 		err = snprintf(nvram_var, sizeof(nvram_var), "gpio%i", i);
 		if (err <= 0)
 		if (err <= 0)
 			continue;
 			continue;

+ 12 - 10
arch/mips/cavium-octeon/octeon-irq.c

@@ -975,10 +975,6 @@ static int octeon_irq_ciu_xlat(struct irq_domain *d,
 	if (ciu > 1 || bit > 63)
 	if (ciu > 1 || bit > 63)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	/* These are the GPIO lines */
-	if (ciu == 0 && bit >= 16 && bit < 32)
-		return -EINVAL;
-
 	*out_hwirq = (ciu << 6) | bit;
 	*out_hwirq = (ciu << 6) | bit;
 	*out_type = 0;
 	*out_type = 0;
 
 
@@ -1007,6 +1003,10 @@ static int octeon_irq_ciu_map(struct irq_domain *d,
 	if (!octeon_irq_virq_in_range(virq))
 	if (!octeon_irq_virq_in_range(virq))
 		return -EINVAL;
 		return -EINVAL;
 
 
+	/* Don't map irq if it is reserved for GPIO. */
+	if (line == 0 && bit >= 16 && bit <32)
+		return 0;
+
 	if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0)
 	if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0)
 		return -EINVAL;
 		return -EINVAL;
 
 
@@ -1525,10 +1525,6 @@ static int octeon_irq_ciu2_xlat(struct irq_domain *d,
 	ciu = intspec[0];
 	ciu = intspec[0];
 	bit = intspec[1];
 	bit = intspec[1];
 
 
-	/* Line 7  are the GPIO lines */
-	if (ciu > 6 || bit > 63)
-		return -EINVAL;
-
 	*out_hwirq = (ciu << 6) | bit;
 	*out_hwirq = (ciu << 6) | bit;
 	*out_type = 0;
 	*out_type = 0;
 
 
@@ -1570,8 +1566,14 @@ static int octeon_irq_ciu2_map(struct irq_domain *d,
 	if (!octeon_irq_virq_in_range(virq))
 	if (!octeon_irq_virq_in_range(virq))
 		return -EINVAL;
 		return -EINVAL;
 
 
-	/* Line 7  are the GPIO lines */
-	if (line > 6 || octeon_irq_ciu_to_irq[line][bit] != 0)
+	/*
+	 * Don't map irq if it is reserved for GPIO.
+	 * (Line 7 are the GPIO lines.)
+	 */
+	if (line == 7)
+		return 0;
+
+	if (line > 7 || octeon_irq_ciu_to_irq[line][bit] != 0)
 		return -EINVAL;
 		return -EINVAL;
 
 
 	if (octeon_irq_ciu2_is_edge(line, bit))
 	if (octeon_irq_ciu2_is_edge(line, bit))

+ 13 - 2
arch/mips/include/asm/asmmacro.h

@@ -9,6 +9,7 @@
 #define _ASM_ASMMACRO_H
 #define _ASM_ASMMACRO_H
 
 
 #include <asm/hazards.h>
 #include <asm/hazards.h>
+#include <asm/asm-offsets.h>
 
 
 #ifdef CONFIG_32BIT
 #ifdef CONFIG_32BIT
 #include <asm/asmmacro-32.h>
 #include <asm/asmmacro-32.h>
@@ -54,11 +55,21 @@
 	.endm
 	.endm
 
 
 	.macro	local_irq_disable reg=t0
 	.macro	local_irq_disable reg=t0
+#ifdef CONFIG_PREEMPT
+	lw      \reg, TI_PRE_COUNT($28)
+	addi    \reg, \reg, 1
+	sw      \reg, TI_PRE_COUNT($28)
+#endif
 	mfc0	\reg, CP0_STATUS
 	mfc0	\reg, CP0_STATUS
 	ori	\reg, \reg, 1
 	ori	\reg, \reg, 1
 	xori	\reg, \reg, 1
 	xori	\reg, \reg, 1
 	mtc0	\reg, CP0_STATUS
 	mtc0	\reg, CP0_STATUS
 	irq_disable_hazard
 	irq_disable_hazard
+#ifdef CONFIG_PREEMPT
+	lw      \reg, TI_PRE_COUNT($28)
+	addi    \reg, \reg, -1
+	sw      \reg, TI_PRE_COUNT($28)
+#endif
 	.endm
 	.endm
 #endif /* CONFIG_MIPS_MT_SMTC */
 #endif /* CONFIG_MIPS_MT_SMTC */
 
 
@@ -106,7 +117,7 @@
 	.endm
 	.endm
 
 
 	.macro	fpu_save_double thread status tmp
 	.macro	fpu_save_double thread status tmp
-#if defined(CONFIG_MIPS64) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
 	sll	\tmp, \status, 5
 	sll	\tmp, \status, 5
 	bgez	\tmp, 10f
 	bgez	\tmp, 10f
 	fpu_save_16odd \thread
 	fpu_save_16odd \thread
@@ -159,7 +170,7 @@
 	.endm
 	.endm
 
 
 	.macro	fpu_restore_double thread status tmp
 	.macro	fpu_restore_double thread status tmp
-#if defined(CONFIG_MIPS64) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
 	sll	\tmp, \status, 5
 	sll	\tmp, \status, 5
 	bgez	\tmp, 10f				# 16 register mode?
 	bgez	\tmp, 10f				# 16 register mode?
 
 

+ 1 - 1
arch/mips/include/asm/fpu.h

@@ -57,7 +57,7 @@ static inline int __enable_fpu(enum fpu_mode mode)
 		return 0;
 		return 0;
 
 
 	case FPU_64BIT:
 	case FPU_64BIT:
-#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_MIPS64))
+#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT))
 		/* we only have a 32-bit FPU */
 		/* we only have a 32-bit FPU */
 		return SIGFPE;
 		return SIGFPE;
 #endif
 #endif

+ 10 - 10
arch/mips/include/asm/ftrace.h

@@ -22,12 +22,12 @@ extern void _mcount(void);
 #define safe_load(load, src, dst, error)		\
 #define safe_load(load, src, dst, error)		\
 do {							\
 do {							\
 	asm volatile (					\
 	asm volatile (					\
-		"1: " load " %[" STR(dst) "], 0(%[" STR(src) "])\n"\
-		"   li %[" STR(error) "], 0\n"		\
+		"1: " load " %[tmp_dst], 0(%[tmp_src])\n"	\
+		"   li %[tmp_err], 0\n"			\
 		"2:\n"					\
 		"2:\n"					\
 							\
 							\
 		".section .fixup, \"ax\"\n"		\
 		".section .fixup, \"ax\"\n"		\
-		"3: li %[" STR(error) "], 1\n"		\
+		"3: li %[tmp_err], 1\n"			\
 		"   j 2b\n"				\
 		"   j 2b\n"				\
 		".previous\n"				\
 		".previous\n"				\
 							\
 							\
@@ -35,8 +35,8 @@ do {							\
 		STR(PTR) "\t1b, 3b\n\t"			\
 		STR(PTR) "\t1b, 3b\n\t"			\
 		".previous\n"				\
 		".previous\n"				\
 							\
 							\
-		: [dst] "=&r" (dst), [error] "=r" (error)\
-		: [src] "r" (src)			\
+		: [tmp_dst] "=&r" (dst), [tmp_err] "=r" (error)\
+		: [tmp_src] "r" (src)			\
 		: "memory"				\
 		: "memory"				\
 	);						\
 	);						\
 } while (0)
 } while (0)
@@ -44,12 +44,12 @@ do {							\
 #define safe_store(store, src, dst, error)	\
 #define safe_store(store, src, dst, error)	\
 do {						\
 do {						\
 	asm volatile (				\
 	asm volatile (				\
-		"1: " store " %[" STR(src) "], 0(%[" STR(dst) "])\n"\
-		"   li %[" STR(error) "], 0\n"	\
+		"1: " store " %[tmp_src], 0(%[tmp_dst])\n"\
+		"   li %[tmp_err], 0\n"		\
 		"2:\n"				\
 		"2:\n"				\
 						\
 						\
 		".section .fixup, \"ax\"\n"	\
 		".section .fixup, \"ax\"\n"	\
-		"3: li %[" STR(error) "], 1\n"	\
+		"3: li %[tmp_err], 1\n"		\
 		"   j 2b\n"			\
 		"   j 2b\n"			\
 		".previous\n"			\
 		".previous\n"			\
 						\
 						\
@@ -57,8 +57,8 @@ do {						\
 		STR(PTR) "\t1b, 3b\n\t"		\
 		STR(PTR) "\t1b, 3b\n\t"		\
 		".previous\n"			\
 		".previous\n"			\
 						\
 						\
-		: [error] "=r" (error)		\
-		: [dst] "r" (dst), [src] "r" (src)\
+		: [tmp_err] "=r" (error)	\
+		: [tmp_dst] "r" (dst), [tmp_src] "r" (src)\
 		: "memory"			\
 		: "memory"			\
 	);					\
 	);					\
 } while (0)
 } while (0)

+ 6 - 4
arch/mips/include/asm/syscall.h

@@ -13,6 +13,7 @@
 #ifndef __ASM_MIPS_SYSCALL_H
 #ifndef __ASM_MIPS_SYSCALL_H
 #define __ASM_MIPS_SYSCALL_H
 #define __ASM_MIPS_SYSCALL_H
 
 
+#include <linux/compiler.h>
 #include <linux/audit.h>
 #include <linux/audit.h>
 #include <linux/elf-em.h>
 #include <linux/elf-em.h>
 #include <linux/kernel.h>
 #include <linux/kernel.h>
@@ -39,14 +40,14 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
 
 
 #ifdef CONFIG_32BIT
 #ifdef CONFIG_32BIT
 	case 4: case 5: case 6: case 7:
 	case 4: case 5: case 6: case 7:
-		return get_user(*arg, (int *)usp + 4 * n);
+		return get_user(*arg, (int *)usp + n);
 #endif
 #endif
 
 
 #ifdef CONFIG_64BIT
 #ifdef CONFIG_64BIT
 	case 4: case 5: case 6: case 7:
 	case 4: case 5: case 6: case 7:
 #ifdef CONFIG_MIPS32_O32
 #ifdef CONFIG_MIPS32_O32
 		if (test_thread_flag(TIF_32BIT_REGS))
 		if (test_thread_flag(TIF_32BIT_REGS))
-			return get_user(*arg, (int *)usp + 4 * n);
+			return get_user(*arg, (int *)usp + n);
 		else
 		else
 #endif
 #endif
 			*arg = regs->regs[4 + n];
 			*arg = regs->regs[4 + n];
@@ -57,6 +58,8 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
 	default:
 	default:
 		BUG();
 		BUG();
 	}
 	}
+
+	unreachable();
 }
 }
 
 
 static inline long syscall_get_return_value(struct task_struct *task,
 static inline long syscall_get_return_value(struct task_struct *task,
@@ -83,11 +86,10 @@ static inline void syscall_get_arguments(struct task_struct *task,
 					 unsigned int i, unsigned int n,
 					 unsigned int i, unsigned int n,
 					 unsigned long *args)
 					 unsigned long *args)
 {
 {
-	unsigned long arg;
 	int ret;
 	int ret;
 
 
 	while (n--)
 	while (n--)
-		ret |= mips_get_syscall_arg(&arg, task, regs, i++);
+		ret |= mips_get_syscall_arg(args++, task, regs, i++);
 
 
 	/*
 	/*
 	 * No way to communicate an error because this is a void function.
 	 * No way to communicate an error because this is a void function.

+ 2 - 2
arch/mips/include/uapi/asm/inst.h

@@ -163,8 +163,8 @@ enum cop1_sdw_func {
  */
  */
 enum cop1x_func {
 enum cop1x_func {
 	lwxc1_op     =	0x00, ldxc1_op	   =  0x01,
 	lwxc1_op     =	0x00, ldxc1_op	   =  0x01,
-	pfetch_op    =	0x07, swxc1_op	   =  0x08,
-	sdxc1_op     =	0x09, madd_s_op	   =  0x20,
+	swxc1_op     =  0x08, sdxc1_op	   =  0x09,
+	pfetch_op    =	0x0f, madd_s_op	   =  0x20,
 	madd_d_op    =	0x21, madd_e_op	   =  0x22,
 	madd_d_op    =	0x21, madd_e_op	   =  0x22,
 	msub_s_op    =	0x28, msub_d_op	   =  0x29,
 	msub_s_op    =	0x28, msub_d_op	   =  0x29,
 	msub_e_op    =	0x2a, nmadd_s_op   =  0x30,
 	msub_e_op    =	0x2a, nmadd_s_op   =  0x30,

+ 2 - 3
arch/mips/kernel/ftrace.c

@@ -111,11 +111,10 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
 	safe_store_code(new_code1, ip, faulted);
 	safe_store_code(new_code1, ip, faulted);
 	if (unlikely(faulted))
 	if (unlikely(faulted))
 		return -EFAULT;
 		return -EFAULT;
-	ip += 4;
-	safe_store_code(new_code2, ip, faulted);
+	safe_store_code(new_code2, ip + 4, faulted);
 	if (unlikely(faulted))
 	if (unlikely(faulted))
 		return -EFAULT;
 		return -EFAULT;
-	flush_icache_range(ip, ip + 8); /* original ip + 12 */
+	flush_icache_range(ip, ip + 8);
 	return 0;
 	return 0;
 }
 }
 #endif
 #endif

+ 8 - 8
arch/mips/kernel/r4k_fpu.S

@@ -35,9 +35,9 @@
 LEAF(_save_fp_context)
 LEAF(_save_fp_context)
 	cfc1	t1, fcr31
 	cfc1	t1, fcr31
 
 
-#if defined(CONFIG_64BIT) || defined(CONFIG_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
 	.set	push
 	.set	push
-#ifdef CONFIG_MIPS32_R2
+#ifdef CONFIG_CPU_MIPS32_R2
 	.set	mips64r2
 	.set	mips64r2
 	mfc0	t0, CP0_STATUS
 	mfc0	t0, CP0_STATUS
 	sll	t0, t0, 5
 	sll	t0, t0, 5
@@ -146,11 +146,11 @@ LEAF(_save_fp_context32)
  *  - cp1 status/control register
  *  - cp1 status/control register
  */
  */
 LEAF(_restore_fp_context)
 LEAF(_restore_fp_context)
-	EX	lw t0, SC_FPC_CSR(a0)
+	EX	lw t1, SC_FPC_CSR(a0)
 
 
-#if defined(CONFIG_64BIT) || defined(CONFIG_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
 	.set	push
 	.set	push
-#ifdef CONFIG_MIPS32_R2
+#ifdef CONFIG_CPU_MIPS32_R2
 	.set	mips64r2
 	.set	mips64r2
 	mfc0	t0, CP0_STATUS
 	mfc0	t0, CP0_STATUS
 	sll	t0, t0, 5
 	sll	t0, t0, 5
@@ -191,7 +191,7 @@ LEAF(_restore_fp_context)
 	EX	ldc1 $f26, SC_FPREGS+208(a0)
 	EX	ldc1 $f26, SC_FPREGS+208(a0)
 	EX	ldc1 $f28, SC_FPREGS+224(a0)
 	EX	ldc1 $f28, SC_FPREGS+224(a0)
 	EX	ldc1 $f30, SC_FPREGS+240(a0)
 	EX	ldc1 $f30, SC_FPREGS+240(a0)
-	ctc1	t0, fcr31
+	ctc1	t1, fcr31
 	jr	ra
 	jr	ra
 	 li	v0, 0					# success
 	 li	v0, 0					# success
 	END(_restore_fp_context)
 	END(_restore_fp_context)
@@ -199,7 +199,7 @@ LEAF(_restore_fp_context)
 #ifdef CONFIG_MIPS32_COMPAT
 #ifdef CONFIG_MIPS32_COMPAT
 LEAF(_restore_fp_context32)
 LEAF(_restore_fp_context32)
 	/* Restore an o32 sigcontext.  */
 	/* Restore an o32 sigcontext.  */
-	EX	lw t0, SC32_FPC_CSR(a0)
+	EX	lw t1, SC32_FPC_CSR(a0)
 
 
 	mfc0	t0, CP0_STATUS
 	mfc0	t0, CP0_STATUS
 	sll	t0, t0, 5
 	sll	t0, t0, 5
@@ -239,7 +239,7 @@ LEAF(_restore_fp_context32)
 	EX	ldc1 $f26, SC32_FPREGS+208(a0)
 	EX	ldc1 $f26, SC32_FPREGS+208(a0)
 	EX	ldc1 $f28, SC32_FPREGS+224(a0)
 	EX	ldc1 $f28, SC32_FPREGS+224(a0)
 	EX	ldc1 $f30, SC32_FPREGS+240(a0)
 	EX	ldc1 $f30, SC32_FPREGS+240(a0)
-	ctc1	t0, fcr31
+	ctc1	t1, fcr31
 	jr	ra
 	jr	ra
 	 li	v0, 0					# success
 	 li	v0, 0					# success
 	END(_restore_fp_context32)
 	END(_restore_fp_context32)

+ 3 - 0
arch/mips/kernel/rtlx-cmp.c

@@ -112,5 +112,8 @@ void __exit rtlx_module_exit(void)
 
 
 	for (i = 0; i < RTLX_CHANNELS; i++)
 	for (i = 0; i < RTLX_CHANNELS; i++)
 		device_destroy(mt_class, MKDEV(major, i));
 		device_destroy(mt_class, MKDEV(major, i));
+
 	unregister_chrdev(major, RTLX_MODULE_NAME);
 	unregister_chrdev(major, RTLX_MODULE_NAME);
+
+	aprp_hook = NULL;
 }
 }

+ 3 - 0
arch/mips/kernel/rtlx-mt.c

@@ -144,5 +144,8 @@ void __exit rtlx_module_exit(void)
 
 
 	for (i = 0; i < RTLX_CHANNELS; i++)
 	for (i = 0; i < RTLX_CHANNELS; i++)
 		device_destroy(mt_class, MKDEV(major, i));
 		device_destroy(mt_class, MKDEV(major, i));
+
 	unregister_chrdev(major, RTLX_MODULE_NAME);
 	unregister_chrdev(major, RTLX_MODULE_NAME);
+
+	aprp_hook = NULL;
 }
 }

+ 3 - 3
arch/mips/math-emu/cp1emu.c

@@ -1538,10 +1538,10 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
 		break;
 		break;
 	}
 	}
 
 
-	case 0x7:		/* 7 */
-		if (MIPSInst_FUNC(ir) != pfetch_op) {
+	case 0x3:
+		if (MIPSInst_FUNC(ir) != pfetch_op)
 			return SIGILL;
 			return SIGILL;
-		}
+
 		/* ignore prefx operation */
 		/* ignore prefx operation */
 		break;
 		break;
 
 

+ 1 - 1
arch/mips/mti-malta/malta-amon.c

@@ -72,7 +72,7 @@ int amon_cpu_start(int cpu,
 	return 0;
 	return 0;
 }
 }
 
 
-#ifdef CONFIG_MIPS_VPE_LOADER
+#ifdef CONFIG_MIPS_VPE_LOADER_CMP
 int vpe_run(struct vpe *v)
 int vpe_run(struct vpe *v)
 {
 {
 	struct vpe_notifications *n;
 	struct vpe_notifications *n;

+ 2 - 2
arch/mips/mti-malta/malta-int.c

@@ -119,7 +119,7 @@ static void malta_hw0_irqdispatch(void)
 
 
 	do_IRQ(MALTA_INT_BASE + irq);
 	do_IRQ(MALTA_INT_BASE + irq);
 
 
-#ifdef MIPS_VPE_APSP_API
+#ifdef CONFIG_MIPS_VPE_APSP_API_MT
 	if (aprp_hook)
 	if (aprp_hook)
 		aprp_hook();
 		aprp_hook();
 #endif
 #endif
@@ -310,7 +310,7 @@ static void ipi_call_dispatch(void)
 
 
 static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
 static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
 {
 {
-#ifdef MIPS_VPE_APSP_API
+#ifdef CONFIG_MIPS_VPE_APSP_API_CMP
 	if (aprp_hook)
 	if (aprp_hook)
 		aprp_hook();
 		aprp_hook();
 #endif
 #endif

+ 1 - 0
arch/mips/pci/msi-octeon.c

@@ -150,6 +150,7 @@ msi_irq_allocated:
 		msg.address_lo =
 		msg.address_lo =
 			((128ul << 20) + CVMX_PCI_MSI_RCV) & 0xffffffff;
 			((128ul << 20) + CVMX_PCI_MSI_RCV) & 0xffffffff;
 		msg.address_hi = ((128ul << 20) + CVMX_PCI_MSI_RCV) >> 32;
 		msg.address_hi = ((128ul << 20) + CVMX_PCI_MSI_RCV) >> 32;
+		break;
 	case OCTEON_DMA_BAR_TYPE_BIG:
 	case OCTEON_DMA_BAR_TYPE_BIG:
 		/* When using big bar, Bar 0 is based at 0 */
 		/* When using big bar, Bar 0 is based at 0 */
 		msg.address_lo = (0 + CVMX_PCI_MSI_RCV) & 0xffffffff;
 		msg.address_lo = (0 + CVMX_PCI_MSI_RCV) & 0xffffffff;

+ 0 - 11
arch/parisc/include/asm/page.h

@@ -32,17 +32,6 @@ void copy_page_asm(void *to, void *from);
 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
 			struct page *pg);
 			struct page *pg);
 
 
-/* #define CONFIG_PARISC_TMPALIAS */
-
-#ifdef CONFIG_PARISC_TMPALIAS
-void clear_user_highpage(struct page *page, unsigned long vaddr);
-#define clear_user_highpage clear_user_highpage
-struct vm_area_struct;
-void copy_user_highpage(struct page *to, struct page *from,
-	unsigned long vaddr, struct vm_area_struct *vma);
-#define __HAVE_ARCH_COPY_USER_HIGHPAGE
-#endif
-
 /*
 /*
  * These are used to make use of C type-checking..
  * These are used to make use of C type-checking..
  */
  */

+ 0 - 4
arch/parisc/include/asm/spinlock.h

@@ -191,8 +191,4 @@ static __inline__ int arch_write_can_lock(arch_rwlock_t *rw)
 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
 
 
-#define arch_spin_relax(lock)	cpu_relax()
-#define arch_read_relax(lock)	cpu_relax()
-#define arch_write_relax(lock)	cpu_relax()
-
 #endif /* __ASM_SPINLOCK_H */
 #endif /* __ASM_SPINLOCK_H */

+ 2 - 2
arch/parisc/include/uapi/asm/unistd.h

@@ -828,13 +828,13 @@
 #define __NR_finit_module	(__NR_Linux + 333)
 #define __NR_finit_module	(__NR_Linux + 333)
 #define __NR_sched_setattr	(__NR_Linux + 334)
 #define __NR_sched_setattr	(__NR_Linux + 334)
 #define __NR_sched_getattr	(__NR_Linux + 335)
 #define __NR_sched_getattr	(__NR_Linux + 335)
+#define __NR_utimes		(__NR_Linux + 336)
 
 
-#define __NR_Linux_syscalls	(__NR_sched_getattr + 1)
+#define __NR_Linux_syscalls	(__NR_utimes + 1)
 
 
 
 
 #define __IGNORE_select		/* newselect */
 #define __IGNORE_select		/* newselect */
 #define __IGNORE_fadvise64	/* fadvise64_64 */
 #define __IGNORE_fadvise64	/* fadvise64_64 */
-#define __IGNORE_utimes		/* utime */
 
 
 
 
 #define HPUX_GATEWAY_ADDR       0xC0000004
 #define HPUX_GATEWAY_ADDR       0xC0000004

+ 0 - 64
arch/parisc/kernel/cache.c

@@ -581,67 +581,3 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
 		__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
 		__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
 	}
 	}
 }
 }
-
-#ifdef CONFIG_PARISC_TMPALIAS
-
-void clear_user_highpage(struct page *page, unsigned long vaddr)
-{
-	void *vto;
-	unsigned long flags;
-
-	/* Clear using TMPALIAS region.  The page doesn't need to
-	   be flushed but the kernel mapping needs to be purged.  */
-
-	vto = kmap_atomic(page);
-
-	/* The PA-RISC 2.0 Architecture book states on page F-6:
-	   "Before a write-capable translation is enabled, *all*
-	   non-equivalently-aliased translations must be removed
-	   from the page table and purged from the TLB.  (Note
-	   that the caches are not required to be flushed at this
-	   time.)  Before any non-equivalent aliased translation
-	   is re-enabled, the virtual address range for the writeable
-	   page (the entire page) must be flushed from the cache,
-	   and the write-capable translation removed from the page
-	   table and purged from the TLB."  */
-
-	purge_kernel_dcache_page_asm((unsigned long)vto);
-	purge_tlb_start(flags);
-	pdtlb_kernel(vto);
-	purge_tlb_end(flags);
-	preempt_disable();
-	clear_user_page_asm(vto, vaddr);
-	preempt_enable();
-
-	pagefault_enable();		/* kunmap_atomic(addr, KM_USER0); */
-}
-
-void copy_user_highpage(struct page *to, struct page *from,
-	unsigned long vaddr, struct vm_area_struct *vma)
-{
-	void *vfrom, *vto;
-	unsigned long flags;
-
-	/* Copy using TMPALIAS region.  This has the advantage
-	   that the `from' page doesn't need to be flushed.  However,
-	   the `to' page must be flushed in copy_user_page_asm since
-	   it can be used to bring in executable code.  */
-
-	vfrom = kmap_atomic(from);
-	vto = kmap_atomic(to);
-
-	purge_kernel_dcache_page_asm((unsigned long)vto);
-	purge_tlb_start(flags);
-	pdtlb_kernel(vto);
-	pdtlb_kernel(vfrom);
-	purge_tlb_end(flags);
-	preempt_disable();
-	copy_user_page_asm(vto, vfrom, vaddr);
-	flush_dcache_page_asm(__pa(vto), vaddr);
-	preempt_enable();
-
-	pagefault_enable();		/* kunmap_atomic(addr, KM_USER1); */
-	pagefault_enable();		/* kunmap_atomic(addr, KM_USER0); */
-}
-
-#endif /* CONFIG_PARISC_TMPALIAS */

+ 1 - 0
arch/parisc/kernel/syscall_table.S

@@ -431,6 +431,7 @@
 	ENTRY_SAME(finit_module)
 	ENTRY_SAME(finit_module)
 	ENTRY_SAME(sched_setattr)
 	ENTRY_SAME(sched_setattr)
 	ENTRY_SAME(sched_getattr)	/* 335 */
 	ENTRY_SAME(sched_getattr)	/* 335 */
+	ENTRY_COMP(utimes)
 
 
 	/* Nothing yet */
 	/* Nothing yet */
 
 

+ 2 - 69
arch/powerpc/kvm/book3s_hv_rmhandlers.S

@@ -1504,73 +1504,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 1:	addi	r8,r8,16
 1:	addi	r8,r8,16
 	.endr
 	.endr
 
 
-	/* Save DEC */
-	mfspr	r5,SPRN_DEC
-	mftb	r6
-	extsw	r5,r5
-	add	r5,r5,r6
-	std	r5,VCPU_DEC_EXPIRES(r9)
-
-BEGIN_FTR_SECTION
-	b	8f
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
-	/* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
-	mfmsr	r8
-	li	r0, 1
-	rldimi	r8, r0, MSR_TM_LG, 63-MSR_TM_LG
-	mtmsrd	r8
-
-	/* Save POWER8-specific registers */
-	mfspr	r5, SPRN_IAMR
-	mfspr	r6, SPRN_PSPB
-	mfspr	r7, SPRN_FSCR
-	std	r5, VCPU_IAMR(r9)
-	stw	r6, VCPU_PSPB(r9)
-	std	r7, VCPU_FSCR(r9)
-	mfspr	r5, SPRN_IC
-	mfspr	r6, SPRN_VTB
-	mfspr	r7, SPRN_TAR
-	std	r5, VCPU_IC(r9)
-	std	r6, VCPU_VTB(r9)
-	std	r7, VCPU_TAR(r9)
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-	mfspr	r5, SPRN_TFHAR
-	mfspr	r6, SPRN_TFIAR
-	mfspr	r7, SPRN_TEXASR
-	std	r5, VCPU_TFHAR(r9)
-	std	r6, VCPU_TFIAR(r9)
-	std	r7, VCPU_TEXASR(r9)
-#endif
-	mfspr	r8, SPRN_EBBHR
-	std	r8, VCPU_EBBHR(r9)
-	mfspr	r5, SPRN_EBBRR
-	mfspr	r6, SPRN_BESCR
-	mfspr	r7, SPRN_CSIGR
-	mfspr	r8, SPRN_TACR
-	std	r5, VCPU_EBBRR(r9)
-	std	r6, VCPU_BESCR(r9)
-	std	r7, VCPU_CSIGR(r9)
-	std	r8, VCPU_TACR(r9)
-	mfspr	r5, SPRN_TCSCR
-	mfspr	r6, SPRN_ACOP
-	mfspr	r7, SPRN_PID
-	mfspr	r8, SPRN_WORT
-	std	r5, VCPU_TCSCR(r9)
-	std	r6, VCPU_ACOP(r9)
-	stw	r7, VCPU_GUEST_PID(r9)
-	std	r8, VCPU_WORT(r9)
-8:
-
-	/* Save and reset AMR and UAMOR before turning on the MMU */
-BEGIN_FTR_SECTION
-	mfspr	r5,SPRN_AMR
-	mfspr	r6,SPRN_UAMOR
-	std	r5,VCPU_AMR(r9)
-	std	r6,VCPU_UAMOR(r9)
-	li	r6,0
-	mtspr	SPRN_AMR,r6
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
-
 	/* Unset guest mode */
 	/* Unset guest mode */
 	li	r0, KVM_GUEST_MODE_NONE
 	li	r0, KVM_GUEST_MODE_NONE
 	stb	r0, HSTATE_IN_GUEST(r13)
 	stb	r0, HSTATE_IN_GUEST(r13)
@@ -2203,7 +2136,7 @@ BEGIN_FTR_SECTION
 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 #endif
 #endif
 	mfspr	r6,SPRN_VRSAVE
 	mfspr	r6,SPRN_VRSAVE
-	stw	r6,VCPU_VRSAVE(r3)
+	stw	r6,VCPU_VRSAVE(r31)
 	mtlr	r30
 	mtlr	r30
 	mtmsrd	r5
 	mtmsrd	r5
 	isync
 	isync
@@ -2240,7 +2173,7 @@ BEGIN_FTR_SECTION
 	bl	.load_vr_state
 	bl	.load_vr_state
 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 #endif
 #endif
-	lwz	r7,VCPU_VRSAVE(r4)
+	lwz	r7,VCPU_VRSAVE(r31)
 	mtspr	SPRN_VRSAVE,r7
 	mtspr	SPRN_VRSAVE,r7
 	mtlr	r30
 	mtlr	r30
 	mr	r4,r31
 	mr	r4,r31

+ 3 - 1
arch/sparc/kernel/process_64.c

@@ -58,9 +58,12 @@ void arch_cpu_idle(void)
 {
 {
 	if (tlb_type != hypervisor) {
 	if (tlb_type != hypervisor) {
 		touch_nmi_watchdog();
 		touch_nmi_watchdog();
+		local_irq_enable();
 	} else {
 	} else {
 		unsigned long pstate;
 		unsigned long pstate;
 
 
+		local_irq_enable();
+
                 /* The sun4v sleeping code requires that we have PSTATE.IE cleared over
                 /* The sun4v sleeping code requires that we have PSTATE.IE cleared over
                  * the cpu sleep hypervisor call.
                  * the cpu sleep hypervisor call.
                  */
                  */
@@ -82,7 +85,6 @@ void arch_cpu_idle(void)
 			: "=&r" (pstate)
 			: "=&r" (pstate)
 			: "i" (PSTATE_IE));
 			: "i" (PSTATE_IE));
 	}
 	}
-	local_irq_enable();
 }
 }
 
 
 #ifdef CONFIG_HOTPLUG_CPU
 #ifdef CONFIG_HOTPLUG_CPU

+ 2 - 2
arch/sparc/kernel/syscalls.S

@@ -189,7 +189,8 @@ linux_sparc_syscall32:
 	 mov	%i0, %l5				! IEU1
 	 mov	%i0, %l5				! IEU1
 5:	call	%l7					! CTI	Group brk forced
 5:	call	%l7					! CTI	Group brk forced
 	 srl	%i5, 0, %o5				! IEU1
 	 srl	%i5, 0, %o5				! IEU1
-	ba,a,pt	%xcc, 3f
+	ba,pt	%xcc, 3f
+	 sra	%o0, 0, %o0
 
 
 	/* Linux native system calls enter here... */
 	/* Linux native system calls enter here... */
 	.align	32
 	.align	32
@@ -217,7 +218,6 @@ linux_sparc_syscall:
 3:	stx	%o0, [%sp + PTREGS_OFF + PT_V9_I0]
 3:	stx	%o0, [%sp + PTREGS_OFF + PT_V9_I0]
 ret_sys_call:
 ret_sys_call:
 	ldx	[%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
 	ldx	[%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
-	sra	%o0, 0, %o0
 	mov	%ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
 	mov	%ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
 	sllx	%g2, 32, %g2
 	sllx	%g2, 32, %g2
 
 

+ 1 - 1
arch/sparc/mm/tsb.c

@@ -273,7 +273,7 @@ void __init pgtable_cache_init(void)
 		prom_halt();
 		prom_halt();
 	}
 	}
 
 
-	for (i = 0; i < 8; i++) {
+	for (i = 0; i < ARRAY_SIZE(tsb_cache_names); i++) {
 		unsigned long size = 8192 << i;
 		unsigned long size = 8192 << i;
 		const char *name = tsb_cache_names[i];
 		const char *name = tsb_cache_names[i];
 
 

+ 2 - 12
arch/x86/include/asm/pgtable.h

@@ -445,20 +445,10 @@ static inline int pte_same(pte_t a, pte_t b)
 	return a.pte == b.pte;
 	return a.pte == b.pte;
 }
 }
 
 
-static inline int pteval_present(pteval_t pteval)
-{
-	/*
-	 * Yes Linus, _PAGE_PROTNONE == _PAGE_NUMA. Expressing it this
-	 * way clearly states that the intent is that protnone and numa
-	 * hinting ptes are considered present for the purposes of
-	 * pagetable operations like zapping, protection changes, gup etc.
-	 */
-	return pteval & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_NUMA);
-}
-
 static inline int pte_present(pte_t a)
 static inline int pte_present(pte_t a)
 {
 {
-	return pteval_present(pte_flags(a));
+	return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
+			       _PAGE_NUMA);
 }
 }
 
 
 #define pte_accessible pte_accessible
 #define pte_accessible pte_accessible

+ 2 - 1
arch/x86/include/asm/topology.h

@@ -119,9 +119,10 @@ static inline void setup_node_to_cpumask_map(void) { }
 
 
 extern const struct cpumask *cpu_coregroup_mask(int cpu);
 extern const struct cpumask *cpu_coregroup_mask(int cpu);
 
 
-#ifdef ENABLE_TOPO_DEFINES
 #define topology_physical_package_id(cpu)	(cpu_data(cpu).phys_proc_id)
 #define topology_physical_package_id(cpu)	(cpu_data(cpu).phys_proc_id)
 #define topology_core_id(cpu)			(cpu_data(cpu).cpu_core_id)
 #define topology_core_id(cpu)			(cpu_data(cpu).cpu_core_id)
+
+#ifdef ENABLE_TOPO_DEFINES
 #define topology_core_cpumask(cpu)		(per_cpu(cpu_core_map, cpu))
 #define topology_core_cpumask(cpu)		(per_cpu(cpu_core_map, cpu))
 #define topology_thread_cpumask(cpu)		(per_cpu(cpu_sibling_map, cpu))
 #define topology_thread_cpumask(cpu)		(per_cpu(cpu_sibling_map, cpu))
 #endif
 #endif

+ 1 - 19
arch/x86/kernel/aperture_64.c

@@ -18,7 +18,6 @@
 #include <linux/pci_ids.h>
 #include <linux/pci_ids.h>
 #include <linux/pci.h>
 #include <linux/pci.h>
 #include <linux/bitops.h>
 #include <linux/bitops.h>
-#include <linux/ioport.h>
 #include <linux/suspend.h>
 #include <linux/suspend.h>
 #include <asm/e820.h>
 #include <asm/e820.h>
 #include <asm/io.h>
 #include <asm/io.h>
@@ -54,18 +53,6 @@ int fallback_aper_force __initdata;
 
 
 int fix_aperture __initdata = 1;
 int fix_aperture __initdata = 1;
 
 
-static struct resource gart_resource = {
-	.name	= "GART",
-	.flags	= IORESOURCE_MEM,
-};
-
-static void __init insert_aperture_resource(u32 aper_base, u32 aper_size)
-{
-	gart_resource.start = aper_base;
-	gart_resource.end = aper_base + aper_size - 1;
-	insert_resource(&iomem_resource, &gart_resource);
-}
-
 /* This code runs before the PCI subsystem is initialized, so just
 /* This code runs before the PCI subsystem is initialized, so just
    access the northbridge directly. */
    access the northbridge directly. */
 
 
@@ -96,7 +83,6 @@ static u32 __init allocate_aperture(void)
 	memblock_reserve(addr, aper_size);
 	memblock_reserve(addr, aper_size);
 	printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n",
 	printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n",
 			aper_size >> 10, addr);
 			aper_size >> 10, addr);
-	insert_aperture_resource((u32)addr, aper_size);
 	register_nosave_region(addr >> PAGE_SHIFT,
 	register_nosave_region(addr >> PAGE_SHIFT,
 			       (addr+aper_size) >> PAGE_SHIFT);
 			       (addr+aper_size) >> PAGE_SHIFT);
 
 
@@ -444,12 +430,8 @@ int __init gart_iommu_hole_init(void)
 
 
 out:
 out:
 	if (!fix && !fallback_aper_force) {
 	if (!fix && !fallback_aper_force) {
-		if (last_aper_base) {
-			unsigned long n = (32 * 1024 * 1024) << last_aper_order;
-
-			insert_aperture_resource((u32)last_aper_base, n);
+		if (last_aper_base)
 			return 1;
 			return 1;
-		}
 		return 0;
 		return 0;
 	}
 	}
 
 

+ 2 - 2
arch/x86/xen/mmu.c

@@ -365,7 +365,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
 /* Assume pteval_t is equivalent to all the other *val_t types. */
 /* Assume pteval_t is equivalent to all the other *val_t types. */
 static pteval_t pte_mfn_to_pfn(pteval_t val)
 static pteval_t pte_mfn_to_pfn(pteval_t val)
 {
 {
-	if (pteval_present(val)) {
+	if (val & _PAGE_PRESENT) {
 		unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
 		unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
 		unsigned long pfn = mfn_to_pfn(mfn);
 		unsigned long pfn = mfn_to_pfn(mfn);
 
 
@@ -381,7 +381,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
 
 
 static pteval_t pte_pfn_to_mfn(pteval_t val)
 static pteval_t pte_pfn_to_mfn(pteval_t val)
 {
 {
-	if (pteval_present(val)) {
+	if (val & _PAGE_PRESENT) {
 		unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
 		unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
 		pteval_t flags = val & PTE_FLAGS_MASK;
 		pteval_t flags = val & PTE_FLAGS_MASK;
 		unsigned long mfn;
 		unsigned long mfn;

+ 12 - 13
block/blk-core.c

@@ -693,20 +693,11 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
 	if (!uninit_q)
 	if (!uninit_q)
 		return NULL;
 		return NULL;
 
 
-	uninit_q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
-	if (!uninit_q->flush_rq)
-		goto out_cleanup_queue;
-
 	q = blk_init_allocated_queue(uninit_q, rfn, lock);
 	q = blk_init_allocated_queue(uninit_q, rfn, lock);
 	if (!q)
 	if (!q)
-		goto out_free_flush_rq;
-	return q;
+		blk_cleanup_queue(uninit_q);
 
 
-out_free_flush_rq:
-	kfree(uninit_q->flush_rq);
-out_cleanup_queue:
-	blk_cleanup_queue(uninit_q);
-	return NULL;
+	return q;
 }
 }
 EXPORT_SYMBOL(blk_init_queue_node);
 EXPORT_SYMBOL(blk_init_queue_node);
 
 
@@ -717,9 +708,13 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
 	if (!q)
 	if (!q)
 		return NULL;
 		return NULL;
 
 
-	if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
+	q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
+	if (!q->flush_rq)
 		return NULL;
 		return NULL;
 
 
+	if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
+		goto fail;
+
 	q->request_fn		= rfn;
 	q->request_fn		= rfn;
 	q->prep_rq_fn		= NULL;
 	q->prep_rq_fn		= NULL;
 	q->unprep_rq_fn		= NULL;
 	q->unprep_rq_fn		= NULL;
@@ -742,12 +737,16 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
 	/* init elevator */
 	/* init elevator */
 	if (elevator_init(q, NULL)) {
 	if (elevator_init(q, NULL)) {
 		mutex_unlock(&q->sysfs_lock);
 		mutex_unlock(&q->sysfs_lock);
-		return NULL;
+		goto fail;
 	}
 	}
 
 
 	mutex_unlock(&q->sysfs_lock);
 	mutex_unlock(&q->sysfs_lock);
 
 
 	return q;
 	return q;
+
+fail:
+	kfree(q->flush_rq);
+	return NULL;
 }
 }
 EXPORT_SYMBOL(blk_init_allocated_queue);
 EXPORT_SYMBOL(blk_init_allocated_queue);
 
 

+ 7 - 4
block/blk-flush.c

@@ -140,14 +140,17 @@ static void mq_flush_run(struct work_struct *work)
 	blk_mq_insert_request(rq, false, true, false);
 	blk_mq_insert_request(rq, false, true, false);
 }
 }
 
 
-static bool blk_flush_queue_rq(struct request *rq)
+static bool blk_flush_queue_rq(struct request *rq, bool add_front)
 {
 {
 	if (rq->q->mq_ops) {
 	if (rq->q->mq_ops) {
 		INIT_WORK(&rq->mq_flush_work, mq_flush_run);
 		INIT_WORK(&rq->mq_flush_work, mq_flush_run);
 		kblockd_schedule_work(rq->q, &rq->mq_flush_work);
 		kblockd_schedule_work(rq->q, &rq->mq_flush_work);
 		return false;
 		return false;
 	} else {
 	} else {
-		list_add_tail(&rq->queuelist, &rq->q->queue_head);
+		if (add_front)
+			list_add(&rq->queuelist, &rq->q->queue_head);
+		else
+			list_add_tail(&rq->queuelist, &rq->q->queue_head);
 		return true;
 		return true;
 	}
 	}
 }
 }
@@ -193,7 +196,7 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
 
 
 	case REQ_FSEQ_DATA:
 	case REQ_FSEQ_DATA:
 		list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
 		list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
-		queued = blk_flush_queue_rq(rq);
+		queued = blk_flush_queue_rq(rq, true);
 		break;
 		break;
 
 
 	case REQ_FSEQ_DONE:
 	case REQ_FSEQ_DONE:
@@ -326,7 +329,7 @@ static bool blk_kick_flush(struct request_queue *q)
 	q->flush_rq->rq_disk = first_rq->rq_disk;
 	q->flush_rq->rq_disk = first_rq->rq_disk;
 	q->flush_rq->end_io = flush_end_io;
 	q->flush_rq->end_io = flush_end_io;
 
 
-	return blk_flush_queue_rq(q->flush_rq);
+	return blk_flush_queue_rq(q->flush_rq, false);
 }
 }
 
 
 static void flush_data_end_io(struct request *rq, int error)
 static void flush_data_end_io(struct request *rq, int error)

+ 1 - 1
drivers/block/mtip32xx/mtip32xx.c

@@ -4498,7 +4498,7 @@ static int mtip_pci_probe(struct pci_dev *pdev,
 	}
 	}
 	dev_info(&pdev->dev, "NUMA node %d (closest: %d,%d, probe on %d:%d)\n",
 	dev_info(&pdev->dev, "NUMA node %d (closest: %d,%d, probe on %d:%d)\n",
 		my_node, pcibus_to_node(pdev->bus), dev_to_node(&pdev->dev),
 		my_node, pcibus_to_node(pdev->bus), dev_to_node(&pdev->dev),
-		cpu_to_node(smp_processor_id()), smp_processor_id());
+		cpu_to_node(raw_smp_processor_id()), raw_smp_processor_id());
 
 
 	dd = kzalloc_node(sizeof(struct driver_data), GFP_KERNEL, my_node);
 	dd = kzalloc_node(sizeof(struct driver_data), GFP_KERNEL, my_node);
 	if (dd == NULL) {
 	if (dd == NULL) {

+ 0 - 1
drivers/block/rbd.c

@@ -2109,7 +2109,6 @@ static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
 	rbd_assert(img_request->obj_request_count > 0);
 	rbd_assert(img_request->obj_request_count > 0);
 	rbd_assert(which != BAD_WHICH);
 	rbd_assert(which != BAD_WHICH);
 	rbd_assert(which < img_request->obj_request_count);
 	rbd_assert(which < img_request->obj_request_count);
-	rbd_assert(which >= img_request->next_completion);
 
 
 	spin_lock_irq(&img_request->completion_lock);
 	spin_lock_irq(&img_request->completion_lock);
 	if (which != img_request->next_completion)
 	if (which != img_request->next_completion)

+ 1 - 1
drivers/clocksource/vf_pit_timer.c

@@ -54,7 +54,7 @@ static inline void pit_irq_acknowledge(void)
 
 
 static u64 pit_read_sched_clock(void)
 static u64 pit_read_sched_clock(void)
 {
 {
-	return __raw_readl(clksrc_base + PITCVAL);
+	return ~__raw_readl(clksrc_base + PITCVAL);
 }
 }
 
 
 static int __init pit_clocksource_init(unsigned long rate)
 static int __init pit_clocksource_init(unsigned long rate)

+ 7 - 3
drivers/gpu/drm/exynos/exynos_drm_drv.c

@@ -172,20 +172,24 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
 
 
 	ret = exynos_drm_subdrv_open(dev, file);
 	ret = exynos_drm_subdrv_open(dev, file);
 	if (ret)
 	if (ret)
-		goto out;
+		goto err_file_priv_free;
 
 
 	anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops,
 	anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops,
 					NULL, 0);
 					NULL, 0);
 	if (IS_ERR(anon_filp)) {
 	if (IS_ERR(anon_filp)) {
 		ret = PTR_ERR(anon_filp);
 		ret = PTR_ERR(anon_filp);
-		goto out;
+		goto err_subdrv_close;
 	}
 	}
 
 
 	anon_filp->f_mode = FMODE_READ | FMODE_WRITE;
 	anon_filp->f_mode = FMODE_READ | FMODE_WRITE;
 	file_priv->anon_filp = anon_filp;
 	file_priv->anon_filp = anon_filp;
 
 
 	return ret;
 	return ret;
-out:
+
+err_subdrv_close:
+	exynos_drm_subdrv_close(dev, file);
+
+err_file_priv_free:
 	kfree(file_priv);
 	kfree(file_priv);
 	file->driver_priv = NULL;
 	file->driver_priv = NULL;
 	return ret;
 	return ret;

+ 7 - 0
drivers/gpu/drm/i915/i915_gem_stolen.c

@@ -214,6 +214,13 @@ int i915_gem_init_stolen(struct drm_device *dev)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int bios_reserved = 0;
 	int bios_reserved = 0;
 
 
+#ifdef CONFIG_INTEL_IOMMU
+	if (intel_iommu_gfx_mapped) {
+		DRM_INFO("DMAR active, disabling use of stolen memory\n");
+		return 0;
+	}
+#endif
+
 	if (dev_priv->gtt.stolen_size == 0)
 	if (dev_priv->gtt.stolen_size == 0)
 		return 0;
 		return 0;
 
 

+ 41 - 30
drivers/gpu/drm/i915/i915_irq.c

@@ -718,33 +718,25 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
 
 
 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
-#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
 
 
 static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
 static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t status;
 	uint32_t status;
-
-	if (INTEL_INFO(dev)->gen < 7) {
-		status = pipe == PIPE_A ?
-			DE_PIPEA_VBLANK :
-			DE_PIPEB_VBLANK;
+	int reg;
+
+	if (INTEL_INFO(dev)->gen >= 8) {
+		status = GEN8_PIPE_VBLANK;
+		reg = GEN8_DE_PIPE_ISR(pipe);
+	} else if (INTEL_INFO(dev)->gen >= 7) {
+		status = DE_PIPE_VBLANK_IVB(pipe);
+		reg = DEISR;
 	} else {
 	} else {
-		switch (pipe) {
-		default:
-		case PIPE_A:
-			status = DE_PIPEA_VBLANK_IVB;
-			break;
-		case PIPE_B:
-			status = DE_PIPEB_VBLANK_IVB;
-			break;
-		case PIPE_C:
-			status = DE_PIPEC_VBLANK_IVB;
-			break;
-		}
+		status = DE_PIPE_VBLANK(pipe);
+		reg = DEISR;
 	}
 	}
 
 
-	return __raw_i915_read32(dev_priv, DEISR) & status;
+	return __raw_i915_read32(dev_priv, reg) & status;
 }
 }
 
 
 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
@@ -802,7 +794,28 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
 		else
 		else
 			position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
 			position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
 
 
-		if (HAS_PCH_SPLIT(dev)) {
+		if (HAS_DDI(dev)) {
+			/*
+			 * On HSW HDMI outputs there seems to be a 2 line
+			 * difference, whereas eDP has the normal 1 line
+			 * difference that earlier platforms have. External
+			 * DP is unknown. For now just check for the 2 line
+			 * difference case on all output types on HSW+.
+			 *
+			 * This might misinterpret the scanline counter being
+			 * one line too far along on eDP, but that's less
+			 * dangerous than the alternative since that would lead
+			 * the vblank timestamp code astray when it sees a
+			 * scanline count before vblank_start during a vblank
+			 * interrupt.
+			 */
+			in_vbl = ilk_pipe_in_vblank_locked(dev, pipe);
+			if ((in_vbl && (position == vbl_start - 2 ||
+					position == vbl_start - 1)) ||
+			    (!in_vbl && (position == vbl_end - 2 ||
+					 position == vbl_end - 1)))
+				position = (position + 2) % vtotal;
+		} else if (HAS_PCH_SPLIT(dev)) {
 			/*
 			/*
 			 * The scanline counter increments at the leading edge
 			 * The scanline counter increments at the leading edge
 			 * of hsync, ie. it completely misses the active portion
 			 * of hsync, ie. it completely misses the active portion
@@ -2947,10 +2960,9 @@ static void ibx_irq_postinstall(struct drm_device *dev)
 		return;
 		return;
 
 
 	if (HAS_PCH_IBX(dev)) {
 	if (HAS_PCH_IBX(dev)) {
-		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
-		       SDE_TRANSA_FIFO_UNDER | SDE_POISON;
+		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
 	} else {
 	} else {
-		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
+		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
 
 
 		I915_WRITE(SERR_INT, I915_READ(SERR_INT));
 		I915_WRITE(SERR_INT, I915_READ(SERR_INT));
 	}
 	}
@@ -3010,20 +3022,19 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
 				DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
 				DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
 				DE_PLANEB_FLIP_DONE_IVB |
 				DE_PLANEB_FLIP_DONE_IVB |
-				DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
-				DE_ERR_INT_IVB);
+				DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
-			      DE_PIPEA_VBLANK_IVB);
+			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
 
 
 		I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
 		I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
 	} else {
 	} else {
 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
 				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
 				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
 				DE_AUX_CHANNEL_A |
 				DE_AUX_CHANNEL_A |
-				DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
 				DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
 				DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
 				DE_POISON);
 				DE_POISON);
-		extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
+		extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
+				DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
 	}
 	}
 
 
 	dev_priv->irq_mask = ~display_mask;
 	dev_priv->irq_mask = ~display_mask;
@@ -3208,9 +3219,9 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
 	struct drm_device *dev = dev_priv->dev;
 	struct drm_device *dev = dev_priv->dev;
 	uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE |
 	uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE |
 		GEN8_PIPE_CDCLK_CRC_DONE |
 		GEN8_PIPE_CDCLK_CRC_DONE |
-		GEN8_PIPE_FIFO_UNDERRUN |
 		GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
 		GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
-	uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK;
+	uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
+		GEN8_PIPE_FIFO_UNDERRUN;
 	int pipe;
 	int pipe;
 	dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
 	dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
 	dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
 	dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;

+ 1 - 1
drivers/gpu/drm/i915/intel_dp.c

@@ -1621,7 +1621,7 @@ static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
 		val |= EDP_PSR_LINK_DISABLE;
 		val |= EDP_PSR_LINK_DISABLE;
 
 
 	I915_WRITE(EDP_PSR_CTL(dev), val |
 	I915_WRITE(EDP_PSR_CTL(dev), val |
-		   IS_BROADWELL(dev) ? 0 : link_entry_time |
+		   (IS_BROADWELL(dev) ? 0 : link_entry_time) |
 		   max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
 		   max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
 		   idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
 		   idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
 		   EDP_PSR_ENABLE);
 		   EDP_PSR_ENABLE);

+ 10 - 4
drivers/gpu/drm/nouveau/nouveau_drm.c

@@ -866,13 +866,16 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
 	int ret;
 	int ret;
 
 
-	if (nouveau_runtime_pm == 0)
-		return -EINVAL;
+	if (nouveau_runtime_pm == 0) {
+		pm_runtime_forbid(dev);
+		return -EBUSY;
+	}
 
 
 	/* are we optimus enabled? */
 	/* are we optimus enabled? */
 	if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
 	if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
 		DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
 		DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
-		return -EINVAL;
+		pm_runtime_forbid(dev);
+		return -EBUSY;
 	}
 	}
 
 
 	nv_debug_level(SILENT);
 	nv_debug_level(SILENT);
@@ -923,12 +926,15 @@ static int nouveau_pmops_runtime_idle(struct device *dev)
 	struct nouveau_drm *drm = nouveau_drm(drm_dev);
 	struct nouveau_drm *drm = nouveau_drm(drm_dev);
 	struct drm_crtc *crtc;
 	struct drm_crtc *crtc;
 
 
-	if (nouveau_runtime_pm == 0)
+	if (nouveau_runtime_pm == 0) {
+		pm_runtime_forbid(dev);
 		return -EBUSY;
 		return -EBUSY;
+	}
 
 
 	/* are we optimus enabled? */
 	/* are we optimus enabled? */
 	if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
 	if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
 		DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
 		DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+		pm_runtime_forbid(dev);
 		return -EBUSY;
 		return -EBUSY;
 	}
 	}
 
 

+ 12 - 5
drivers/gpu/drm/radeon/radeon_drv.c

@@ -404,11 +404,15 @@ static int radeon_pmops_runtime_suspend(struct device *dev)
 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
 	int ret;
 	int ret;
 
 
-	if (radeon_runtime_pm == 0)
-		return -EINVAL;
+	if (radeon_runtime_pm == 0) {
+		pm_runtime_forbid(dev);
+		return -EBUSY;
+	}
 
 
-	if (radeon_runtime_pm == -1 && !radeon_is_px())
-		return -EINVAL;
+	if (radeon_runtime_pm == -1 && !radeon_is_px()) {
+		pm_runtime_forbid(dev);
+		return -EBUSY;
+	}
 
 
 	drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 	drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 	drm_kms_helper_poll_disable(drm_dev);
 	drm_kms_helper_poll_disable(drm_dev);
@@ -457,12 +461,15 @@ static int radeon_pmops_runtime_idle(struct device *dev)
 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
 	struct drm_crtc *crtc;
 	struct drm_crtc *crtc;
 
 
-	if (radeon_runtime_pm == 0)
+	if (radeon_runtime_pm == 0) {
+		pm_runtime_forbid(dev);
 		return -EBUSY;
 		return -EBUSY;
+	}
 
 
 	/* are we PX enabled? */
 	/* are we PX enabled? */
 	if (radeon_runtime_pm == -1 && !radeon_is_px()) {
 	if (radeon_runtime_pm == -1 && !radeon_is_px()) {
 		DRM_DEBUG_DRIVER("failing to power off - not px\n");
 		DRM_DEBUG_DRIVER("failing to power off - not px\n");
+		pm_runtime_forbid(dev);
 		return -EBUSY;
 		return -EBUSY;
 	}
 	}
 
 

+ 8 - 3
drivers/gpu/drm/udl/udl_gem.c

@@ -177,8 +177,10 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj)
 	if (obj->vmapping)
 	if (obj->vmapping)
 		udl_gem_vunmap(obj);
 		udl_gem_vunmap(obj);
 
 
-	if (gem_obj->import_attach)
+	if (gem_obj->import_attach) {
 		drm_prime_gem_destroy(gem_obj, obj->sg);
 		drm_prime_gem_destroy(gem_obj, obj->sg);
+		put_device(gem_obj->dev->dev);
+	}
 
 
 	if (obj->pages)
 	if (obj->pages)
 		udl_gem_put_pages(obj);
 		udl_gem_put_pages(obj);
@@ -256,9 +258,12 @@ struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
 	int ret;
 	int ret;
 
 
 	/* need to attach */
 	/* need to attach */
+	get_device(dev->dev);
 	attach = dma_buf_attach(dma_buf, dev->dev);
 	attach = dma_buf_attach(dma_buf, dev->dev);
-	if (IS_ERR(attach))
+	if (IS_ERR(attach)) {
+		put_device(dev->dev);
 		return ERR_CAST(attach);
 		return ERR_CAST(attach);
+	}
 
 
 	get_dma_buf(dma_buf);
 	get_dma_buf(dma_buf);
 
 
@@ -282,6 +287,6 @@ fail_unmap:
 fail_detach:
 fail_detach:
 	dma_buf_detach(dma_buf, attach);
 	dma_buf_detach(dma_buf, attach);
 	dma_buf_put(dma_buf);
 	dma_buf_put(dma_buf);
-
+	put_device(dev->dev);
 	return ERR_PTR(ret);
 	return ERR_PTR(ret);
 }
 }

+ 2 - 0
drivers/hid/hid-lg4ff.c

@@ -43,6 +43,7 @@
 #define G25_REV_MIN 0x22
 #define G25_REV_MIN 0x22
 #define G27_REV_MAJ 0x12
 #define G27_REV_MAJ 0x12
 #define G27_REV_MIN 0x38
 #define G27_REV_MIN 0x38
+#define G27_2_REV_MIN 0x39
 
 
 #define to_hid_device(pdev) container_of(pdev, struct hid_device, dev)
 #define to_hid_device(pdev) container_of(pdev, struct hid_device, dev)
 
 
@@ -130,6 +131,7 @@ static const struct lg4ff_usb_revision lg4ff_revs[] = {
 	{DFP_REV_MAJ,  DFP_REV_MIN,  &native_dfp},	/* Driving Force Pro */
 	{DFP_REV_MAJ,  DFP_REV_MIN,  &native_dfp},	/* Driving Force Pro */
 	{G25_REV_MAJ,  G25_REV_MIN,  &native_g25},	/* G25 */
 	{G25_REV_MAJ,  G25_REV_MIN,  &native_g25},	/* G25 */
 	{G27_REV_MAJ,  G27_REV_MIN,  &native_g27},	/* G27 */
 	{G27_REV_MAJ,  G27_REV_MIN,  &native_g27},	/* G27 */
+	{G27_REV_MAJ,  G27_2_REV_MIN,  &native_g27},	/* G27 v2 */
 };
 };
 
 
 /* Recalculates X axis value accordingly to currently selected range */
 /* Recalculates X axis value accordingly to currently selected range */

+ 12 - 15
drivers/hid/hid-sony.c

@@ -42,6 +42,7 @@
 #define DUALSHOCK4_CONTROLLER_BT  BIT(6)
 #define DUALSHOCK4_CONTROLLER_BT  BIT(6)
 
 
 #define SONY_LED_SUPPORT (SIXAXIS_CONTROLLER_USB | BUZZ_CONTROLLER | DUALSHOCK4_CONTROLLER_USB)
 #define SONY_LED_SUPPORT (SIXAXIS_CONTROLLER_USB | BUZZ_CONTROLLER | DUALSHOCK4_CONTROLLER_USB)
+#define SONY_FF_SUPPORT (SIXAXIS_CONTROLLER_USB | DUALSHOCK4_CONTROLLER_USB)
 
 
 #define MAX_LEDS 4
 #define MAX_LEDS 4
 
 
@@ -499,6 +500,7 @@ struct sony_sc {
 	__u8 right;
 	__u8 right;
 #endif
 #endif
 
 
+	__u8 worker_initialized;
 	__u8 led_state[MAX_LEDS];
 	__u8 led_state[MAX_LEDS];
 	__u8 led_count;
 	__u8 led_count;
 };
 };
@@ -993,22 +995,11 @@ static int sony_init_ff(struct hid_device *hdev)
 	return input_ff_create_memless(input_dev, NULL, sony_play_effect);
 	return input_ff_create_memless(input_dev, NULL, sony_play_effect);
 }
 }
 
 
-static void sony_destroy_ff(struct hid_device *hdev)
-{
-	struct sony_sc *sc = hid_get_drvdata(hdev);
-
-	cancel_work_sync(&sc->state_worker);
-}
-
 #else
 #else
 static int sony_init_ff(struct hid_device *hdev)
 static int sony_init_ff(struct hid_device *hdev)
 {
 {
 	return 0;
 	return 0;
 }
 }
-
-static void sony_destroy_ff(struct hid_device *hdev)
-{
-}
 #endif
 #endif
 
 
 static int sony_set_output_report(struct sony_sc *sc, int req_id, int req_size)
 static int sony_set_output_report(struct sony_sc *sc, int req_id, int req_size)
@@ -1077,6 +1068,8 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
 	if (sc->quirks & SIXAXIS_CONTROLLER_USB) {
 	if (sc->quirks & SIXAXIS_CONTROLLER_USB) {
 		hdev->hid_output_raw_report = sixaxis_usb_output_raw_report;
 		hdev->hid_output_raw_report = sixaxis_usb_output_raw_report;
 		ret = sixaxis_set_operational_usb(hdev);
 		ret = sixaxis_set_operational_usb(hdev);
+
+		sc->worker_initialized = 1;
 		INIT_WORK(&sc->state_worker, sixaxis_state_worker);
 		INIT_WORK(&sc->state_worker, sixaxis_state_worker);
 	}
 	}
 	else if (sc->quirks & SIXAXIS_CONTROLLER_BT)
 	else if (sc->quirks & SIXAXIS_CONTROLLER_BT)
@@ -1087,6 +1080,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
 		if (ret < 0)
 		if (ret < 0)
 			goto err_stop;
 			goto err_stop;
 
 
+		sc->worker_initialized = 1;
 		INIT_WORK(&sc->state_worker, dualshock4_state_worker);
 		INIT_WORK(&sc->state_worker, dualshock4_state_worker);
 	} else {
 	} else {
 		ret = 0;
 		ret = 0;
@@ -1101,9 +1095,11 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
 			goto err_stop;
 			goto err_stop;
 	}
 	}
 
 
-	ret = sony_init_ff(hdev);
-	if (ret < 0)
-		goto err_stop;
+	if (sc->quirks & SONY_FF_SUPPORT) {
+		ret = sony_init_ff(hdev);
+		if (ret < 0)
+			goto err_stop;
+	}
 
 
 	return 0;
 	return 0;
 err_stop:
 err_stop:
@@ -1120,7 +1116,8 @@ static void sony_remove(struct hid_device *hdev)
 	if (sc->quirks & SONY_LED_SUPPORT)
 	if (sc->quirks & SONY_LED_SUPPORT)
 		sony_leds_remove(hdev);
 		sony_leds_remove(hdev);
 
 
-	sony_destroy_ff(hdev);
+	if (sc->worker_initialized)
+		cancel_work_sync(&sc->state_worker);
 
 
 	hid_hw_stop(hdev);
 	hid_hw_stop(hdev);
 }
 }

+ 2 - 2
drivers/hid/hidraw.c

@@ -320,13 +320,13 @@ static void drop_ref(struct hidraw *hidraw, int exists_bit)
 			hid_hw_close(hidraw->hid);
 			hid_hw_close(hidraw->hid);
 			wake_up_interruptible(&hidraw->wait);
 			wake_up_interruptible(&hidraw->wait);
 		}
 		}
+		device_destroy(hidraw_class,
+			       MKDEV(hidraw_major, hidraw->minor));
 	} else {
 	} else {
 		--hidraw->open;
 		--hidraw->open;
 	}
 	}
 	if (!hidraw->open) {
 	if (!hidraw->open) {
 		if (!hidraw->exist) {
 		if (!hidraw->exist) {
-			device_destroy(hidraw_class,
-					MKDEV(hidraw_major, hidraw->minor));
 			hidraw_table[hidraw->minor] = NULL;
 			hidraw_table[hidraw->minor] = NULL;
 			kfree(hidraw);
 			kfree(hidraw);
 		} else {
 		} else {

+ 2 - 0
drivers/i2c/busses/i2c-cpm.c

@@ -39,7 +39,9 @@
 #include <linux/i2c.h>
 #include <linux/i2c.h>
 #include <linux/io.h>
 #include <linux/io.h>
 #include <linux/dma-mapping.h>
 #include <linux/dma-mapping.h>
+#include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/of_device.h>
+#include <linux/of_irq.h>
 #include <linux/of_platform.h>
 #include <linux/of_platform.h>
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_soc.h>
 #include <asm/cpm.h>
 #include <asm/cpm.h>

+ 3 - 1
drivers/input/evdev.c

@@ -954,11 +954,13 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
 			return -EFAULT;
 			return -EFAULT;
 
 
 		error = input_ff_upload(dev, &effect, file);
 		error = input_ff_upload(dev, &effect, file);
+		if (error)
+			return error;
 
 
 		if (put_user(effect.id, &(((struct ff_effect __user *)p)->id)))
 		if (put_user(effect.id, &(((struct ff_effect __user *)p)->id)))
 			return -EFAULT;
 			return -EFAULT;
 
 
-		return error;
+		return 0;
 	}
 	}
 
 
 	/* Multi-number variable-length handlers */
 	/* Multi-number variable-length handlers */

+ 11 - 1
drivers/input/keyboard/adp5588-keys.c

@@ -76,8 +76,18 @@ static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off)
 	struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
 	struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
 	unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
 	unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
 	unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
 	unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
+	int val;
 
 
-	return !!(adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank) & bit);
+	mutex_lock(&kpad->gpio_lock);
+
+	if (kpad->dir[bank] & bit)
+		val = kpad->dat_out[bank];
+	else
+		val = adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank);
+
+	mutex_unlock(&kpad->gpio_lock);
+
+	return !!(val & bit);
 }
 }
 
 
 static void adp5588_gpio_set_value(struct gpio_chip *chip,
 static void adp5588_gpio_set_value(struct gpio_chip *chip,

+ 16 - 13
drivers/input/misc/da9052_onkey.c

@@ -27,29 +27,32 @@ struct da9052_onkey {
 
 
 static void da9052_onkey_query(struct da9052_onkey *onkey)
 static void da9052_onkey_query(struct da9052_onkey *onkey)
 {
 {
-	int key_stat;
+	int ret;
 
 
-	key_stat = da9052_reg_read(onkey->da9052, DA9052_EVENT_B_REG);
-	if (key_stat < 0) {
+	ret = da9052_reg_read(onkey->da9052, DA9052_STATUS_A_REG);
+	if (ret < 0) {
 		dev_err(onkey->da9052->dev,
 		dev_err(onkey->da9052->dev,
-			"Failed to read onkey event %d\n", key_stat);
+			"Failed to read onkey event err=%d\n", ret);
 	} else {
 	} else {
 		/*
 		/*
 		 * Since interrupt for deassertion of ONKEY pin is not
 		 * Since interrupt for deassertion of ONKEY pin is not
 		 * generated, onkey event state determines the onkey
 		 * generated, onkey event state determines the onkey
 		 * button state.
 		 * button state.
 		 */
 		 */
-		key_stat &= DA9052_EVENTB_ENONKEY;
-		input_report_key(onkey->input, KEY_POWER, key_stat);
+		bool pressed = !(ret & DA9052_STATUSA_NONKEY);
+
+		input_report_key(onkey->input, KEY_POWER, pressed);
 		input_sync(onkey->input);
 		input_sync(onkey->input);
-	}
 
 
-	/*
-	 * Interrupt is generated only when the ONKEY pin is asserted.
-	 * Hence the deassertion of the pin is simulated through work queue.
-	 */
-	if (key_stat)
-		schedule_delayed_work(&onkey->work, msecs_to_jiffies(50));
+		/*
+		 * Interrupt is generated only when the ONKEY pin
+		 * is asserted.  Hence the deassertion of the pin
+		 * is simulated through work queue.
+		 */
+		if (pressed)
+			schedule_delayed_work(&onkey->work,
+						msecs_to_jiffies(50));
+	}
 }
 }
 
 
 static void da9052_onkey_work(struct work_struct *work)
 static void da9052_onkey_work(struct work_struct *work)

+ 0 - 1
drivers/input/mouse/cypress_ps2.c

@@ -409,7 +409,6 @@ static int cypress_set_input_params(struct input_dev *input,
 	__clear_bit(REL_X, input->relbit);
 	__clear_bit(REL_X, input->relbit);
 	__clear_bit(REL_Y, input->relbit);
 	__clear_bit(REL_Y, input->relbit);
 
 
-	__set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
 	__set_bit(EV_KEY, input->evbit);
 	__set_bit(EV_KEY, input->evbit);
 	__set_bit(BTN_LEFT, input->keybit);
 	__set_bit(BTN_LEFT, input->keybit);
 	__set_bit(BTN_RIGHT, input->keybit);
 	__set_bit(BTN_RIGHT, input->keybit);

+ 55 - 0
drivers/input/mouse/synaptics.c

@@ -265,11 +265,22 @@ static int synaptics_identify(struct psmouse *psmouse)
  * Read touchpad resolution and maximum reported coordinates
  * Read touchpad resolution and maximum reported coordinates
  * Resolution is left zero if touchpad does not support the query
  * Resolution is left zero if touchpad does not support the query
  */
  */
+
+static const int *quirk_min_max;
+
 static int synaptics_resolution(struct psmouse *psmouse)
 static int synaptics_resolution(struct psmouse *psmouse)
 {
 {
 	struct synaptics_data *priv = psmouse->private;
 	struct synaptics_data *priv = psmouse->private;
 	unsigned char resp[3];
 	unsigned char resp[3];
 
 
+	if (quirk_min_max) {
+		priv->x_min = quirk_min_max[0];
+		priv->x_max = quirk_min_max[1];
+		priv->y_min = quirk_min_max[2];
+		priv->y_max = quirk_min_max[3];
+		return 0;
+	}
+
 	if (SYN_ID_MAJOR(priv->identity) < 4)
 	if (SYN_ID_MAJOR(priv->identity) < 4)
 		return 0;
 		return 0;
 
 
@@ -1485,10 +1496,54 @@ static const struct dmi_system_id olpc_dmi_table[] __initconst = {
 	{ }
 	{ }
 };
 };
 
 
+static const struct dmi_system_id min_max_dmi_table[] __initconst = {
+#if defined(CONFIG_DMI)
+	{
+		/* Lenovo ThinkPad Helix */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+			DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"),
+		},
+		.driver_data = (int []){1024, 5052, 2258, 4832},
+	},
+	{
+		/* Lenovo ThinkPad X240 */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+			DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X240"),
+		},
+		.driver_data = (int []){1232, 5710, 1156, 4696},
+	},
+	{
+		/* Lenovo ThinkPad T440s */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+			DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T440"),
+		},
+		.driver_data = (int []){1024, 5112, 2024, 4832},
+	},
+	{
+		/* Lenovo ThinkPad T540p */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+			DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"),
+		},
+		.driver_data = (int []){1024, 5056, 2058, 4832},
+	},
+#endif
+	{ }
+};
+
 void __init synaptics_module_init(void)
 void __init synaptics_module_init(void)
 {
 {
+	const struct dmi_system_id *min_max_dmi;
+
 	impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table);
 	impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table);
 	broken_olpc_ec = dmi_check_system(olpc_dmi_table);
 	broken_olpc_ec = dmi_check_system(olpc_dmi_table);
+
+	min_max_dmi = dmi_first_match(min_max_dmi_table);
+	if (min_max_dmi)
+		quirk_min_max = min_max_dmi->driver_data;
 }
 }
 
 
 static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
 static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)

+ 42 - 31
drivers/input/mousedev.c

@@ -67,7 +67,6 @@ struct mousedev {
 	struct device dev;
 	struct device dev;
 	struct cdev cdev;
 	struct cdev cdev;
 	bool exist;
 	bool exist;
-	bool is_mixdev;
 
 
 	struct list_head mixdev_node;
 	struct list_head mixdev_node;
 	bool opened_by_mixdev;
 	bool opened_by_mixdev;
@@ -77,6 +76,9 @@ struct mousedev {
 	int old_x[4], old_y[4];
 	int old_x[4], old_y[4];
 	int frac_dx, frac_dy;
 	int frac_dx, frac_dy;
 	unsigned long touch;
 	unsigned long touch;
+
+	int (*open_device)(struct mousedev *mousedev);
+	void (*close_device)(struct mousedev *mousedev);
 };
 };
 
 
 enum mousedev_emul {
 enum mousedev_emul {
@@ -116,9 +118,6 @@ static unsigned char mousedev_imex_seq[] = { 0xf3, 200, 0xf3, 200, 0xf3, 80 };
 static struct mousedev *mousedev_mix;
 static struct mousedev *mousedev_mix;
 static LIST_HEAD(mousedev_mix_list);
 static LIST_HEAD(mousedev_mix_list);
 
 
-static void mixdev_open_devices(void);
-static void mixdev_close_devices(void);
-
 #define fx(i)  (mousedev->old_x[(mousedev->pkt_count - (i)) & 03])
 #define fx(i)  (mousedev->old_x[(mousedev->pkt_count - (i)) & 03])
 #define fy(i)  (mousedev->old_y[(mousedev->pkt_count - (i)) & 03])
 #define fy(i)  (mousedev->old_y[(mousedev->pkt_count - (i)) & 03])
 
 
@@ -428,9 +427,7 @@ static int mousedev_open_device(struct mousedev *mousedev)
 	if (retval)
 	if (retval)
 		return retval;
 		return retval;
 
 
-	if (mousedev->is_mixdev)
-		mixdev_open_devices();
-	else if (!mousedev->exist)
+	if (!mousedev->exist)
 		retval = -ENODEV;
 		retval = -ENODEV;
 	else if (!mousedev->open++) {
 	else if (!mousedev->open++) {
 		retval = input_open_device(&mousedev->handle);
 		retval = input_open_device(&mousedev->handle);
@@ -446,9 +443,7 @@ static void mousedev_close_device(struct mousedev *mousedev)
 {
 {
 	mutex_lock(&mousedev->mutex);
 	mutex_lock(&mousedev->mutex);
 
 
-	if (mousedev->is_mixdev)
-		mixdev_close_devices();
-	else if (mousedev->exist && !--mousedev->open)
+	if (mousedev->exist && !--mousedev->open)
 		input_close_device(&mousedev->handle);
 		input_close_device(&mousedev->handle);
 
 
 	mutex_unlock(&mousedev->mutex);
 	mutex_unlock(&mousedev->mutex);
@@ -459,21 +454,29 @@ static void mousedev_close_device(struct mousedev *mousedev)
  * stream. Note that this function is called with mousedev_mix->mutex
  * stream. Note that this function is called with mousedev_mix->mutex
  * held.
  * held.
  */
  */
-static void mixdev_open_devices(void)
+static int mixdev_open_devices(struct mousedev *mixdev)
 {
 {
-	struct mousedev *mousedev;
+	int error;
+
+	error = mutex_lock_interruptible(&mixdev->mutex);
+	if (error)
+		return error;
 
 
-	if (mousedev_mix->open++)
-		return;
+	if (!mixdev->open++) {
+		struct mousedev *mousedev;
 
 
-	list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
-		if (!mousedev->opened_by_mixdev) {
-			if (mousedev_open_device(mousedev))
-				continue;
+		list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
+			if (!mousedev->opened_by_mixdev) {
+				if (mousedev_open_device(mousedev))
+					continue;
 
 
-			mousedev->opened_by_mixdev = true;
+				mousedev->opened_by_mixdev = true;
+			}
 		}
 		}
 	}
 	}
+
+	mutex_unlock(&mixdev->mutex);
+	return 0;
 }
 }
 
 
 /*
 /*
@@ -481,19 +484,22 @@ static void mixdev_open_devices(void)
  * device. Note that this function is called with mousedev_mix->mutex
  * device. Note that this function is called with mousedev_mix->mutex
  * held.
  * held.
  */
  */
-static void mixdev_close_devices(void)
+static void mixdev_close_devices(struct mousedev *mixdev)
 {
 {
-	struct mousedev *mousedev;
+	mutex_lock(&mixdev->mutex);
 
 
-	if (--mousedev_mix->open)
-		return;
+	if (!--mixdev->open) {
+		struct mousedev *mousedev;
 
 
-	list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
-		if (mousedev->opened_by_mixdev) {
-			mousedev->opened_by_mixdev = false;
-			mousedev_close_device(mousedev);
+		list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
+			if (mousedev->opened_by_mixdev) {
+				mousedev->opened_by_mixdev = false;
+				mousedev_close_device(mousedev);
+			}
 		}
 		}
 	}
 	}
+
+	mutex_unlock(&mixdev->mutex);
 }
 }
 
 
 
 
@@ -522,7 +528,7 @@ static int mousedev_release(struct inode *inode, struct file *file)
 	mousedev_detach_client(mousedev, client);
 	mousedev_detach_client(mousedev, client);
 	kfree(client);
 	kfree(client);
 
 
-	mousedev_close_device(mousedev);
+	mousedev->close_device(mousedev);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -550,7 +556,7 @@ static int mousedev_open(struct inode *inode, struct file *file)
 	client->mousedev = mousedev;
 	client->mousedev = mousedev;
 	mousedev_attach_client(mousedev, client);
 	mousedev_attach_client(mousedev, client);
 
 
-	error = mousedev_open_device(mousedev);
+	error = mousedev->open_device(mousedev);
 	if (error)
 	if (error)
 		goto err_free_client;
 		goto err_free_client;
 
 
@@ -861,16 +867,21 @@ static struct mousedev *mousedev_create(struct input_dev *dev,
 
 
 	if (mixdev) {
 	if (mixdev) {
 		dev_set_name(&mousedev->dev, "mice");
 		dev_set_name(&mousedev->dev, "mice");
+
+		mousedev->open_device = mixdev_open_devices;
+		mousedev->close_device = mixdev_close_devices;
 	} else {
 	} else {
 		int dev_no = minor;
 		int dev_no = minor;
 		/* Normalize device number if it falls into legacy range */
 		/* Normalize device number if it falls into legacy range */
 		if (dev_no < MOUSEDEV_MINOR_BASE + MOUSEDEV_MINORS)
 		if (dev_no < MOUSEDEV_MINOR_BASE + MOUSEDEV_MINORS)
 			dev_no -= MOUSEDEV_MINOR_BASE;
 			dev_no -= MOUSEDEV_MINOR_BASE;
 		dev_set_name(&mousedev->dev, "mouse%d", dev_no);
 		dev_set_name(&mousedev->dev, "mouse%d", dev_no);
+
+		mousedev->open_device = mousedev_open_device;
+		mousedev->close_device = mousedev_close_device;
 	}
 	}
 
 
 	mousedev->exist = true;
 	mousedev->exist = true;
-	mousedev->is_mixdev = mixdev;
 	mousedev->handle.dev = input_get_device(dev);
 	mousedev->handle.dev = input_get_device(dev);
 	mousedev->handle.name = dev_name(&mousedev->dev);
 	mousedev->handle.name = dev_name(&mousedev->dev);
 	mousedev->handle.handler = handler;
 	mousedev->handle.handler = handler;
@@ -919,7 +930,7 @@ static void mousedev_destroy(struct mousedev *mousedev)
 	device_del(&mousedev->dev);
 	device_del(&mousedev->dev);
 	mousedev_cleanup(mousedev);
 	mousedev_cleanup(mousedev);
 	input_free_minor(MINOR(mousedev->dev.devt));
 	input_free_minor(MINOR(mousedev->dev.devt));
-	if (!mousedev->is_mixdev)
+	if (mousedev != mousedev_mix)
 		input_unregister_handle(&mousedev->handle);
 		input_unregister_handle(&mousedev->handle);
 	put_device(&mousedev->dev);
 	put_device(&mousedev->dev);
 }
 }

+ 9 - 9
drivers/isdn/capi/Kconfig

@@ -16,9 +16,17 @@ config CAPI_TRACE
 	  This will increase the size of the kernelcapi module by 20 KB.
 	  This will increase the size of the kernelcapi module by 20 KB.
 	  If unsure, say Y.
 	  If unsure, say Y.
 
 
+config ISDN_CAPI_CAPI20
+	tristate "CAPI2.0 /dev/capi support"
+	help
+	  This option will provide the CAPI 2.0 interface to userspace
+	  applications via /dev/capi20. Applications should use the
+	  standardized libcapi20 to access this functionality.  You should say
+	  Y/M here.
+
 config ISDN_CAPI_MIDDLEWARE
 config ISDN_CAPI_MIDDLEWARE
 	bool "CAPI2.0 Middleware support"
 	bool "CAPI2.0 Middleware support"
-	depends on TTY
+	depends on ISDN_CAPI_CAPI20 && TTY
 	help
 	help
 	  This option will enhance the capabilities of the /dev/capi20
 	  This option will enhance the capabilities of the /dev/capi20
 	  interface.  It will provide a means of moving a data connection,
 	  interface.  It will provide a means of moving a data connection,
@@ -26,14 +34,6 @@ config ISDN_CAPI_MIDDLEWARE
 	  device.  If you want to use pppd with pppdcapiplugin to dial up to
 	  device.  If you want to use pppd with pppdcapiplugin to dial up to
 	  your ISP, say Y here.
 	  your ISP, say Y here.
 
 
-config ISDN_CAPI_CAPI20
-	tristate "CAPI2.0 /dev/capi support"
-	help
-	  This option will provide the CAPI 2.0 interface to userspace
-	  applications via /dev/capi20. Applications should use the
-	  standardized libcapi20 to access this functionality.  You should say
-	  Y/M here.
-
 config ISDN_CAPI_CAPIDRV
 config ISDN_CAPI_CAPIDRV
 	tristate "CAPI2.0 capidrv interface support"
 	tristate "CAPI2.0 capidrv interface support"
 	depends on ISDN_I4L
 	depends on ISDN_I4L

+ 4 - 10
drivers/net/ethernet/atheros/alx/main.c

@@ -1248,19 +1248,13 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	 * shared register for the high 32 bits, so only a single, aligned,
 	 * shared register for the high 32 bits, so only a single, aligned,
 	 * 4 GB physical address range can be used for descriptors.
 	 * 4 GB physical address range can be used for descriptors.
 	 */
 	 */
-	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
-	    !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
 		dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n");
 		dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n");
 	} else {
 	} else {
-		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
 		if (err) {
 		if (err) {
-			err = dma_set_coherent_mask(&pdev->dev,
-						    DMA_BIT_MASK(32));
-			if (err) {
-				dev_err(&pdev->dev,
-					"No usable DMA config, aborting\n");
-				goto out_pci_disable;
-			}
+			dev_err(&pdev->dev, "No usable DMA config, aborting\n");
+			goto out_pci_disable;
 		}
 		}
 	}
 	}
 
 

+ 2 - 2
drivers/net/ethernet/atheros/atl1e/atl1e_main.c

@@ -2436,7 +2436,7 @@ err_reset:
 err_register:
 err_register:
 err_sw_init:
 err_sw_init:
 err_eeprom:
 err_eeprom:
-	iounmap(adapter->hw.hw_addr);
+	pci_iounmap(pdev, adapter->hw.hw_addr);
 err_init_netdev:
 err_init_netdev:
 err_ioremap:
 err_ioremap:
 	free_netdev(netdev);
 	free_netdev(netdev);
@@ -2474,7 +2474,7 @@ static void atl1e_remove(struct pci_dev *pdev)
 	unregister_netdev(netdev);
 	unregister_netdev(netdev);
 	atl1e_free_ring_resources(adapter);
 	atl1e_free_ring_resources(adapter);
 	atl1e_force_ps(&adapter->hw);
 	atl1e_force_ps(&adapter->hw);
-	iounmap(adapter->hw.hw_addr);
+	pci_iounmap(pdev, adapter->hw.hw_addr);
 	pci_release_regions(pdev);
 	pci_release_regions(pdev);
 	free_netdev(netdev);
 	free_netdev(netdev);
 	pci_disable_device(pdev);
 	pci_disable_device(pdev);

+ 56 - 55
drivers/net/ethernet/broadcom/cnic.c

@@ -1,6 +1,6 @@
 /* cnic.c: Broadcom CNIC core network driver.
 /* cnic.c: Broadcom CNIC core network driver.
  *
  *
- * Copyright (c) 2006-2013 Broadcom Corporation
+ * Copyright (c) 2006-2014 Broadcom Corporation
  *
  *
  * This program is free software; you can redistribute it and/or modify
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * it under the terms of the GNU General Public License as published by
@@ -342,7 +342,7 @@ static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
 	while (retry < 3) {
 	while (retry < 3) {
 		rc = 0;
 		rc = 0;
 		rcu_read_lock();
 		rcu_read_lock();
-		ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
+		ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
 		if (ulp_ops)
 		if (ulp_ops)
 			rc = ulp_ops->iscsi_nl_send_msg(
 			rc = ulp_ops->iscsi_nl_send_msg(
 				cp->ulp_handle[CNIC_ULP_ISCSI],
 				cp->ulp_handle[CNIC_ULP_ISCSI],
@@ -726,7 +726,7 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
 
 
 	for (i = 0; i < dma->num_pages; i++) {
 	for (i = 0; i < dma->num_pages; i++) {
 		if (dma->pg_arr[i]) {
 		if (dma->pg_arr[i]) {
-			dma_free_coherent(&dev->pcidev->dev, BNX2_PAGE_SIZE,
+			dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE,
 					  dma->pg_arr[i], dma->pg_map_arr[i]);
 					  dma->pg_arr[i], dma->pg_map_arr[i]);
 			dma->pg_arr[i] = NULL;
 			dma->pg_arr[i] = NULL;
 		}
 		}
@@ -785,7 +785,7 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
 
 
 	for (i = 0; i < pages; i++) {
 	for (i = 0; i < pages; i++) {
 		dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
 		dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
-						    BNX2_PAGE_SIZE,
+						    CNIC_PAGE_SIZE,
 						    &dma->pg_map_arr[i],
 						    &dma->pg_map_arr[i],
 						    GFP_ATOMIC);
 						    GFP_ATOMIC);
 		if (dma->pg_arr[i] == NULL)
 		if (dma->pg_arr[i] == NULL)
@@ -794,8 +794,8 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
 	if (!use_pg_tbl)
 	if (!use_pg_tbl)
 		return 0;
 		return 0;
 
 
-	dma->pgtbl_size = ((pages * 8) + BNX2_PAGE_SIZE - 1) &
-			  ~(BNX2_PAGE_SIZE - 1);
+	dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) &
+			  ~(CNIC_PAGE_SIZE - 1);
 	dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
 	dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
 					&dma->pgtbl_map, GFP_ATOMIC);
 					&dma->pgtbl_map, GFP_ATOMIC);
 	if (dma->pgtbl == NULL)
 	if (dma->pgtbl == NULL)
@@ -900,8 +900,8 @@ static int cnic_alloc_context(struct cnic_dev *dev)
 	if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
 	if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
 		int i, k, arr_size;
 		int i, k, arr_size;
 
 
-		cp->ctx_blk_size = BNX2_PAGE_SIZE;
-		cp->cids_per_blk = BNX2_PAGE_SIZE / 128;
+		cp->ctx_blk_size = CNIC_PAGE_SIZE;
+		cp->cids_per_blk = CNIC_PAGE_SIZE / 128;
 		arr_size = BNX2_MAX_CID / cp->cids_per_blk *
 		arr_size = BNX2_MAX_CID / cp->cids_per_blk *
 			   sizeof(struct cnic_ctx);
 			   sizeof(struct cnic_ctx);
 		cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
 		cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
@@ -933,7 +933,7 @@ static int cnic_alloc_context(struct cnic_dev *dev)
 		for (i = 0; i < cp->ctx_blks; i++) {
 		for (i = 0; i < cp->ctx_blks; i++) {
 			cp->ctx_arr[i].ctx =
 			cp->ctx_arr[i].ctx =
 				dma_alloc_coherent(&dev->pcidev->dev,
 				dma_alloc_coherent(&dev->pcidev->dev,
-						   BNX2_PAGE_SIZE,
+						   CNIC_PAGE_SIZE,
 						   &cp->ctx_arr[i].mapping,
 						   &cp->ctx_arr[i].mapping,
 						   GFP_KERNEL);
 						   GFP_KERNEL);
 			if (cp->ctx_arr[i].ctx == NULL)
 			if (cp->ctx_arr[i].ctx == NULL)
@@ -1013,7 +1013,7 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
 	if (udev->l2_ring)
 	if (udev->l2_ring)
 		return 0;
 		return 0;
 
 
-	udev->l2_ring_size = pages * BNX2_PAGE_SIZE;
+	udev->l2_ring_size = pages * CNIC_PAGE_SIZE;
 	udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
 	udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
 					   &udev->l2_ring_map,
 					   &udev->l2_ring_map,
 					   GFP_KERNEL | __GFP_COMP);
 					   GFP_KERNEL | __GFP_COMP);
@@ -1021,7 +1021,7 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
 	udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
 	udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
-	udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
+	udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size);
 	udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
 	udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
 					  &udev->l2_buf_map,
 					  &udev->l2_buf_map,
 					  GFP_KERNEL | __GFP_COMP);
 					  GFP_KERNEL | __GFP_COMP);
@@ -1102,7 +1102,7 @@ static int cnic_init_uio(struct cnic_dev *dev)
 		uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
 		uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
 						     TX_MAX_TSS_RINGS + 1);
 						     TX_MAX_TSS_RINGS + 1);
 		uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
 		uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
-					PAGE_MASK;
+					CNIC_PAGE_MASK;
 		if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
 		if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
 			uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
 			uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
 		else
 		else
@@ -1113,7 +1113,7 @@ static int cnic_init_uio(struct cnic_dev *dev)
 		uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
 		uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
 
 
 		uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
 		uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
-			PAGE_MASK;
+			CNIC_PAGE_MASK;
 		uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
 		uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
 
 
 		uinfo->name = "bnx2x_cnic";
 		uinfo->name = "bnx2x_cnic";
@@ -1267,14 +1267,14 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
 	for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
 	for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
 		cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
 		cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
 
 
-	pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
-		PAGE_SIZE;
+	pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
+		CNIC_PAGE_SIZE;
 
 
 	ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
 	ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
 	if (ret)
 	if (ret)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
+	n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
 	for (i = 0, j = 0; i < cp->max_cid_space; i++) {
 	for (i = 0, j = 0; i < cp->max_cid_space; i++) {
 		long off = CNIC_KWQ16_DATA_SIZE * (i % n);
 		long off = CNIC_KWQ16_DATA_SIZE * (i % n);
 
 
@@ -1296,7 +1296,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
 			goto error;
 			goto error;
 	}
 	}
 
 
-	pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
+	pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE;
 	ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
 	ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
 	if (ret)
 	if (ret)
 		goto error;
 		goto error;
@@ -1466,8 +1466,8 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
 	cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
 	cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
 			BNX2X_ISCSI_R2TQE_SIZE;
 			BNX2X_ISCSI_R2TQE_SIZE;
 	cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
 	cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
-	pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
-	hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
+	pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
+	hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
 	cp->num_cqs = req1->num_cqs;
 	cp->num_cqs = req1->num_cqs;
 
 
 	if (!dev->max_iscsi_conn)
 	if (!dev->max_iscsi_conn)
@@ -1477,9 +1477,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
 	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
 	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
 		  req1->rq_num_wqes);
 		  req1->rq_num_wqes);
 	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
 	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
-		  PAGE_SIZE);
+		  CNIC_PAGE_SIZE);
 	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
 	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
-		 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
+		 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
 	CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
 	CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
 		  TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
 		  TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
 		  req1->num_tasks_per_conn);
 		  req1->num_tasks_per_conn);
@@ -1489,9 +1489,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
 		  USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
 		  USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
 		  req1->rq_buffer_size);
 		  req1->rq_buffer_size);
 	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
 	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
-		  PAGE_SIZE);
+		  CNIC_PAGE_SIZE);
 	CNIC_WR8(dev, BAR_USTRORM_INTMEM +
 	CNIC_WR8(dev, BAR_USTRORM_INTMEM +
-		 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
+		 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
 	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
 	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
 		  USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
 		  USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
 		  req1->num_tasks_per_conn);
 		  req1->num_tasks_per_conn);
@@ -1504,9 +1504,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
 
 
 	/* init Xstorm RAM */
 	/* init Xstorm RAM */
 	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
 	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
-		  PAGE_SIZE);
+		  CNIC_PAGE_SIZE);
 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
-		 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
+		 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
 	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
 	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
 		  XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
 		  XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
 		  req1->num_tasks_per_conn);
 		  req1->num_tasks_per_conn);
@@ -1519,9 +1519,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
 
 
 	/* init Cstorm RAM */
 	/* init Cstorm RAM */
 	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
 	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
-		  PAGE_SIZE);
+		  CNIC_PAGE_SIZE);
 	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
 	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
-		 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
+		 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
 	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
 	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
 		  CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
 		  CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
 		  req1->num_tasks_per_conn);
 		  req1->num_tasks_per_conn);
@@ -1623,18 +1623,18 @@ static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
 	}
 	}
 
 
 	ctx->cid = cid;
 	ctx->cid = cid;
-	pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE;
+	pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE;
 
 
 	ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
 	ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
 	if (ret)
 	if (ret)
 		goto error;
 		goto error;
 
 
-	pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE;
+	pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE;
 	ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
 	ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
 	if (ret)
 	if (ret)
 		goto error;
 		goto error;
 
 
-	pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
+	pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
 	ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
 	ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
 	if (ret)
 	if (ret)
 		goto error;
 		goto error;
@@ -1760,7 +1760,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
 	ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
 	ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
 	/* TSTORM requires the base address of RQ DB & not PTE */
 	/* TSTORM requires the base address of RQ DB & not PTE */
 	ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
 	ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
-		req2->rq_page_table_addr_lo & PAGE_MASK;
+		req2->rq_page_table_addr_lo & CNIC_PAGE_MASK;
 	ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
 	ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
 		req2->rq_page_table_addr_hi;
 		req2->rq_page_table_addr_hi;
 	ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
 	ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
@@ -1842,7 +1842,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
 	/* CSTORM and USTORM initialization is different, CSTORM requires
 	/* CSTORM and USTORM initialization is different, CSTORM requires
 	 * CQ DB base & not PTE addr */
 	 * CQ DB base & not PTE addr */
 	ictx->cstorm_st_context.cq_db_base.lo =
 	ictx->cstorm_st_context.cq_db_base.lo =
-		req1->cq_page_table_addr_lo & PAGE_MASK;
+		req1->cq_page_table_addr_lo & CNIC_PAGE_MASK;
 	ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
 	ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
 	ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
 	ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
 	ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
 	ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
@@ -2911,7 +2911,7 @@ static int cnic_l2_completion(struct cnic_local *cp)
 	u16 hw_cons, sw_cons;
 	u16 hw_cons, sw_cons;
 	struct cnic_uio_dev *udev = cp->udev;
 	struct cnic_uio_dev *udev = cp->udev;
 	union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
 	union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
-					(udev->l2_ring + (2 * BNX2_PAGE_SIZE));
+					(udev->l2_ring + (2 * CNIC_PAGE_SIZE));
 	u32 cmd;
 	u32 cmd;
 	int comp = 0;
 	int comp = 0;
 
 
@@ -3244,7 +3244,8 @@ static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
 	int rc;
 	int rc;
 
 
 	mutex_lock(&cnic_lock);
 	mutex_lock(&cnic_lock);
-	ulp_ops = cnic_ulp_tbl_prot(ulp_type);
+	ulp_ops = rcu_dereference_protected(cp->ulp_ops[ulp_type],
+					    lockdep_is_held(&cnic_lock));
 	if (ulp_ops && ulp_ops->cnic_get_stats)
 	if (ulp_ops && ulp_ops->cnic_get_stats)
 		rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
 		rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
 	else
 	else
@@ -4384,7 +4385,7 @@ static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
 		u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
 		u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
 		u32 val;
 		u32 val;
 
 
-		memset(cp->ctx_arr[i].ctx, 0, BNX2_PAGE_SIZE);
+		memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE);
 
 
 		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
 		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
 			(cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
 			(cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
@@ -4628,7 +4629,7 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
 		val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
 		val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
 	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
 	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
 
 
-	rxbd = udev->l2_ring + BNX2_PAGE_SIZE;
+	rxbd = udev->l2_ring + CNIC_PAGE_SIZE;
 	for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) {
 	for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) {
 		dma_addr_t buf_map;
 		dma_addr_t buf_map;
 		int n = (i % cp->l2_rx_ring_size) + 1;
 		int n = (i % cp->l2_rx_ring_size) + 1;
@@ -4639,11 +4640,11 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
 		rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
 		rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
 		rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
 		rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
 	}
 	}
-	val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32;
+	val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
 	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
 	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
 	rxbd->rx_bd_haddr_hi = val;
 	rxbd->rx_bd_haddr_hi = val;
 
 
-	val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff;
+	val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
 	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
 	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
 	rxbd->rx_bd_haddr_lo = val;
 	rxbd->rx_bd_haddr_lo = val;
 
 
@@ -4709,10 +4710,10 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
 
 
 	val = CNIC_RD(dev, BNX2_MQ_CONFIG);
 	val = CNIC_RD(dev, BNX2_MQ_CONFIG);
 	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
 	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
-	if (BNX2_PAGE_BITS > 12)
+	if (CNIC_PAGE_BITS > 12)
 		val |= (12 - 8)  << 4;
 		val |= (12 - 8)  << 4;
 	else
 	else
-		val |= (BNX2_PAGE_BITS - 8)  << 4;
+		val |= (CNIC_PAGE_BITS - 8)  << 4;
 
 
 	CNIC_WR(dev, BNX2_MQ_CONFIG, val);
 	CNIC_WR(dev, BNX2_MQ_CONFIG, val);
 
 
@@ -4742,13 +4743,13 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
 
 
 	/* Initialize the kernel work queue context. */
 	/* Initialize the kernel work queue context. */
 	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
 	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
-	      (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
+	      (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
 	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
 	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
 
 
-	val = (BNX2_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
+	val = (CNIC_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
 	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
 	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
 
 
-	val = ((BNX2_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
+	val = ((CNIC_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
 	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
 	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
 
 
 	val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
 	val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
@@ -4768,13 +4769,13 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
 
 
 	/* Initialize the kernel complete queue context. */
 	/* Initialize the kernel complete queue context. */
 	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
 	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
-	      (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
+	      (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
 	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
 	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
 
 
-	val = (BNX2_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
+	val = (CNIC_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
 	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
 	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
 
 
-	val = ((BNX2_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
+	val = ((CNIC_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
 	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
 	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
 
 
 	val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
 	val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
@@ -4918,7 +4919,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
 	u32 cli = cp->ethdev->iscsi_l2_client_id;
 	u32 cli = cp->ethdev->iscsi_l2_client_id;
 	u32 val;
 	u32 val;
 
 
-	memset(txbd, 0, BNX2_PAGE_SIZE);
+	memset(txbd, 0, CNIC_PAGE_SIZE);
 
 
 	buf_map = udev->l2_buf_map;
 	buf_map = udev->l2_buf_map;
 	for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) {
 	for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) {
@@ -4978,9 +4979,9 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
 	struct bnx2x *bp = netdev_priv(dev->netdev);
 	struct bnx2x *bp = netdev_priv(dev->netdev);
 	struct cnic_uio_dev *udev = cp->udev;
 	struct cnic_uio_dev *udev = cp->udev;
 	struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
 	struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
-				BNX2_PAGE_SIZE);
+				CNIC_PAGE_SIZE);
 	struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
 	struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
-				(udev->l2_ring + (2 * BNX2_PAGE_SIZE));
+				(udev->l2_ring + (2 * CNIC_PAGE_SIZE));
 	struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
 	struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
 	int i;
 	int i;
 	u32 cli = cp->ethdev->iscsi_l2_client_id;
 	u32 cli = cp->ethdev->iscsi_l2_client_id;
@@ -5004,20 +5005,20 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
 		rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
 		rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
 	}
 	}
 
 
-	val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32;
+	val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
 	rxbd->addr_hi = cpu_to_le32(val);
 	rxbd->addr_hi = cpu_to_le32(val);
 	data->rx.bd_page_base.hi = cpu_to_le32(val);
 	data->rx.bd_page_base.hi = cpu_to_le32(val);
 
 
-	val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff;
+	val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
 	rxbd->addr_lo = cpu_to_le32(val);
 	rxbd->addr_lo = cpu_to_le32(val);
 	data->rx.bd_page_base.lo = cpu_to_le32(val);
 	data->rx.bd_page_base.lo = cpu_to_le32(val);
 
 
 	rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
 	rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
-	val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) >> 32;
+	val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) >> 32;
 	rxcqe->addr_hi = cpu_to_le32(val);
 	rxcqe->addr_hi = cpu_to_le32(val);
 	data->rx.cqe_page_base.hi = cpu_to_le32(val);
 	data->rx.cqe_page_base.hi = cpu_to_le32(val);
 
 
-	val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) & 0xffffffff;
+	val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) & 0xffffffff;
 	rxcqe->addr_lo = cpu_to_le32(val);
 	rxcqe->addr_lo = cpu_to_le32(val);
 	data->rx.cqe_page_base.lo = cpu_to_le32(val);
 	data->rx.cqe_page_base.lo = cpu_to_le32(val);
 
 
@@ -5265,8 +5266,8 @@ static void cnic_shutdown_rings(struct cnic_dev *dev)
 		msleep(10);
 		msleep(10);
 	}
 	}
 	clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
 	clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
-	rx_ring = udev->l2_ring + BNX2_PAGE_SIZE;
-	memset(rx_ring, 0, BNX2_PAGE_SIZE);
+	rx_ring = udev->l2_ring + CNIC_PAGE_SIZE;
+	memset(rx_ring, 0, CNIC_PAGE_SIZE);
 }
 }
 
 
 static int cnic_register_netdev(struct cnic_dev *dev)
 static int cnic_register_netdev(struct cnic_dev *dev)

+ 1 - 1
drivers/net/ethernet/broadcom/cnic.h

@@ -1,6 +1,6 @@
 /* cnic.h: Broadcom CNIC core network driver.
 /* cnic.h: Broadcom CNIC core network driver.
  *
  *
- * Copyright (c) 2006-2013 Broadcom Corporation
+ * Copyright (c) 2006-2014 Broadcom Corporation
  *
  *
  * This program is free software; you can redistribute it and/or modify
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * it under the terms of the GNU General Public License as published by

+ 1 - 1
drivers/net/ethernet/broadcom/cnic_defs.h

@@ -1,7 +1,7 @@
 
 
 /* cnic.c: Broadcom CNIC core network driver.
 /* cnic.c: Broadcom CNIC core network driver.
  *
  *
- * Copyright (c) 2006-2013 Broadcom Corporation
+ * Copyright (c) 2006-2014 Broadcom Corporation
  *
  *
  * This program is free software; you can redistribute it and/or modify
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * it under the terms of the GNU General Public License as published by

+ 13 - 3
drivers/net/ethernet/broadcom/cnic_if.h

@@ -1,6 +1,6 @@
 /* cnic_if.h: Broadcom CNIC core network driver.
 /* cnic_if.h: Broadcom CNIC core network driver.
  *
  *
- * Copyright (c) 2006-2013 Broadcom Corporation
+ * Copyright (c) 2006-2014 Broadcom Corporation
  *
  *
  * This program is free software; you can redistribute it and/or modify
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * it under the terms of the GNU General Public License as published by
@@ -14,8 +14,8 @@
 
 
 #include "bnx2x/bnx2x_mfw_req.h"
 #include "bnx2x/bnx2x_mfw_req.h"
 
 
-#define CNIC_MODULE_VERSION	"2.5.19"
-#define CNIC_MODULE_RELDATE	"December 19, 2013"
+#define CNIC_MODULE_VERSION	"2.5.20"
+#define CNIC_MODULE_RELDATE	"March 14, 2014"
 
 
 #define CNIC_ULP_RDMA		0
 #define CNIC_ULP_RDMA		0
 #define CNIC_ULP_ISCSI		1
 #define CNIC_ULP_ISCSI		1
@@ -24,6 +24,16 @@
 #define MAX_CNIC_ULP_TYPE_EXT	3
 #define MAX_CNIC_ULP_TYPE_EXT	3
 #define MAX_CNIC_ULP_TYPE	4
 #define MAX_CNIC_ULP_TYPE	4
 
 
+/* Use CPU native page size up to 16K for cnic ring sizes.  */
+#if (PAGE_SHIFT > 14)
+#define CNIC_PAGE_BITS	14
+#else
+#define CNIC_PAGE_BITS	PAGE_SHIFT
+#endif
+#define CNIC_PAGE_SIZE	(1 << (CNIC_PAGE_BITS))
+#define CNIC_PAGE_ALIGN(addr) ALIGN(addr, CNIC_PAGE_SIZE)
+#define CNIC_PAGE_MASK	(~((CNIC_PAGE_SIZE) - 1))
+
 struct kwqe {
 struct kwqe {
 	u32 kwqe_op_flag;
 	u32 kwqe_op_flag;
 
 

+ 2 - 3
drivers/net/ethernet/broadcom/tg3.c

@@ -17649,8 +17649,6 @@ static int tg3_init_one(struct pci_dev *pdev,
 
 
 	tg3_init_bufmgr_config(tp);
 	tg3_init_bufmgr_config(tp);
 
 
-	features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
-
 	/* 5700 B0 chips do not support checksumming correctly due
 	/* 5700 B0 chips do not support checksumming correctly due
 	 * to hardware bugs.
 	 * to hardware bugs.
 	 */
 	 */
@@ -17682,7 +17680,8 @@ static int tg3_init_one(struct pci_dev *pdev,
 			features |= NETIF_F_TSO_ECN;
 			features |= NETIF_F_TSO_ECN;
 	}
 	}
 
 
-	dev->features |= features;
+	dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
+			 NETIF_F_HW_VLAN_CTAG_RX;
 	dev->vlan_features |= features;
 	dev->vlan_features |= features;
 
 
 	/*
 	/*

+ 20 - 40
drivers/net/ethernet/marvell/mvneta.c

@@ -22,6 +22,7 @@
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
 #include <net/ip.h>
 #include <net/ip.h>
 #include <net/ipv6.h>
 #include <net/ipv6.h>
+#include <linux/io.h>
 #include <linux/of.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
 #include <linux/of_irq.h>
 #include <linux/of_mdio.h>
 #include <linux/of_mdio.h>
@@ -88,8 +89,9 @@
 #define      MVNETA_TX_IN_PRGRS                  BIT(1)
 #define      MVNETA_TX_IN_PRGRS                  BIT(1)
 #define      MVNETA_TX_FIFO_EMPTY                BIT(8)
 #define      MVNETA_TX_FIFO_EMPTY                BIT(8)
 #define MVNETA_RX_MIN_FRAME_SIZE                 0x247c
 #define MVNETA_RX_MIN_FRAME_SIZE                 0x247c
-#define MVNETA_SGMII_SERDES_CFG			 0x24A0
+#define MVNETA_SERDES_CFG			 0x24A0
 #define      MVNETA_SGMII_SERDES_PROTO		 0x0cc7
 #define      MVNETA_SGMII_SERDES_PROTO		 0x0cc7
+#define      MVNETA_RGMII_SERDES_PROTO		 0x0667
 #define MVNETA_TYPE_PRIO                         0x24bc
 #define MVNETA_TYPE_PRIO                         0x24bc
 #define      MVNETA_FORCE_UNI                    BIT(21)
 #define      MVNETA_FORCE_UNI                    BIT(21)
 #define MVNETA_TXQ_CMD_1                         0x24e4
 #define MVNETA_TXQ_CMD_1                         0x24e4
@@ -161,7 +163,7 @@
 #define      MVNETA_GMAC_MAX_RX_SIZE_MASK        0x7ffc
 #define      MVNETA_GMAC_MAX_RX_SIZE_MASK        0x7ffc
 #define      MVNETA_GMAC0_PORT_ENABLE            BIT(0)
 #define      MVNETA_GMAC0_PORT_ENABLE            BIT(0)
 #define MVNETA_GMAC_CTRL_2                       0x2c08
 #define MVNETA_GMAC_CTRL_2                       0x2c08
-#define      MVNETA_GMAC2_PSC_ENABLE             BIT(3)
+#define      MVNETA_GMAC2_PCS_ENABLE             BIT(3)
 #define      MVNETA_GMAC2_PORT_RGMII             BIT(4)
 #define      MVNETA_GMAC2_PORT_RGMII             BIT(4)
 #define      MVNETA_GMAC2_PORT_RESET             BIT(6)
 #define      MVNETA_GMAC2_PORT_RESET             BIT(6)
 #define MVNETA_GMAC_STATUS                       0x2c10
 #define MVNETA_GMAC_STATUS                       0x2c10
@@ -710,35 +712,6 @@ static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
 	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
 	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
 }
 }
 
 
-
-
-/* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
-static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
-{
-	u32  val;
-
-	val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
-
-	if (enable)
-		val |= MVNETA_GMAC2_PORT_RGMII;
-	else
-		val &= ~MVNETA_GMAC2_PORT_RGMII;
-
-	mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
-}
-
-/* Config SGMII port */
-static void mvneta_port_sgmii_config(struct mvneta_port *pp)
-{
-	u32 val;
-
-	val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
-	val |= MVNETA_GMAC2_PSC_ENABLE;
-	mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
-
-	mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
-}
-
 /* Start the Ethernet port RX and TX activity */
 /* Start the Ethernet port RX and TX activity */
 static void mvneta_port_up(struct mvneta_port *pp)
 static void mvneta_port_up(struct mvneta_port *pp)
 {
 {
@@ -2756,12 +2729,15 @@ static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
 	mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
 	mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
 
 
 	if (phy_mode == PHY_INTERFACE_MODE_SGMII)
 	if (phy_mode == PHY_INTERFACE_MODE_SGMII)
-		mvneta_port_sgmii_config(pp);
+		mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
+	else
+		mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_RGMII_SERDES_PROTO);
+
+	val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
 
 
-	mvneta_gmac_rgmii_set(pp, 1);
+	val |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
 
 
 	/* Cancel Port Reset */
 	/* Cancel Port Reset */
-	val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
 	val &= ~MVNETA_GMAC2_PORT_RESET;
 	val &= ~MVNETA_GMAC2_PORT_RESET;
 	mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
 	mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
 
 
@@ -2774,6 +2750,7 @@ static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
 static int mvneta_probe(struct platform_device *pdev)
 static int mvneta_probe(struct platform_device *pdev)
 {
 {
 	const struct mbus_dram_target_info *dram_target_info;
 	const struct mbus_dram_target_info *dram_target_info;
+	struct resource *res;
 	struct device_node *dn = pdev->dev.of_node;
 	struct device_node *dn = pdev->dev.of_node;
 	struct device_node *phy_node;
 	struct device_node *phy_node;
 	u32 phy_addr;
 	u32 phy_addr;
@@ -2838,9 +2815,15 @@ static int mvneta_probe(struct platform_device *pdev)
 
 
 	clk_prepare_enable(pp->clk);
 	clk_prepare_enable(pp->clk);
 
 
-	pp->base = of_iomap(dn, 0);
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		err = -ENODEV;
+		goto err_clk;
+	}
+
+	pp->base = devm_ioremap_resource(&pdev->dev, res);
 	if (pp->base == NULL) {
 	if (pp->base == NULL) {
-		err = -ENOMEM;
+		err = PTR_ERR(pp->base);
 		goto err_clk;
 		goto err_clk;
 	}
 	}
 
 
@@ -2848,7 +2831,7 @@ static int mvneta_probe(struct platform_device *pdev)
 	pp->stats = alloc_percpu(struct mvneta_pcpu_stats);
 	pp->stats = alloc_percpu(struct mvneta_pcpu_stats);
 	if (!pp->stats) {
 	if (!pp->stats) {
 		err = -ENOMEM;
 		err = -ENOMEM;
-		goto err_unmap;
+		goto err_clk;
 	}
 	}
 
 
 	for_each_possible_cpu(cpu) {
 	for_each_possible_cpu(cpu) {
@@ -2913,8 +2896,6 @@ err_deinit:
 	mvneta_deinit(pp);
 	mvneta_deinit(pp);
 err_free_stats:
 err_free_stats:
 	free_percpu(pp->stats);
 	free_percpu(pp->stats);
-err_unmap:
-	iounmap(pp->base);
 err_clk:
 err_clk:
 	clk_disable_unprepare(pp->clk);
 	clk_disable_unprepare(pp->clk);
 err_free_irq:
 err_free_irq:
@@ -2934,7 +2915,6 @@ static int mvneta_remove(struct platform_device *pdev)
 	mvneta_deinit(pp);
 	mvneta_deinit(pp);
 	clk_disable_unprepare(pp->clk);
 	clk_disable_unprepare(pp->clk);
 	free_percpu(pp->stats);
 	free_percpu(pp->stats);
-	iounmap(pp->base);
 	irq_dispose_mapping(dev->irq);
 	irq_dispose_mapping(dev->irq);
 	free_netdev(dev);
 	free_netdev(dev);
 
 

+ 5 - 1
drivers/net/ethernet/mellanox/mlx4/main.c

@@ -2681,7 +2681,11 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
 
 
 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
 {
 {
-	int ret = __mlx4_init_one(pdev, 0);
+	const struct pci_device_id *id;
+	int ret;
+
+	id = pci_match_id(mlx4_pci_table, pdev);
+	ret = __mlx4_init_one(pdev, id->driver_data);
 
 
 	return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
 	return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
 }
 }

+ 29 - 1
drivers/net/ethernet/micrel/ks8851.c

@@ -23,6 +23,7 @@
 #include <linux/crc32.h>
 #include <linux/crc32.h>
 #include <linux/mii.h>
 #include <linux/mii.h>
 #include <linux/eeprom_93cx6.h>
 #include <linux/eeprom_93cx6.h>
+#include <linux/regulator/consumer.h>
 
 
 #include <linux/spi/spi.h>
 #include <linux/spi/spi.h>
 
 
@@ -83,6 +84,7 @@ union ks8851_tx_hdr {
  * @rc_rxqcr: Cached copy of KS_RXQCR.
  * @rc_rxqcr: Cached copy of KS_RXQCR.
  * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
  * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
  * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
  * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
+ * @vdd_reg:	Optional regulator supplying the chip
  *
  *
  * The @lock ensures that the chip is protected when certain operations are
  * The @lock ensures that the chip is protected when certain operations are
  * in progress. When the read or write packet transfer is in progress, most
  * in progress. When the read or write packet transfer is in progress, most
@@ -130,6 +132,7 @@ struct ks8851_net {
 	struct spi_transfer	spi_xfer2[2];
 	struct spi_transfer	spi_xfer2[2];
 
 
 	struct eeprom_93cx6	eeprom;
 	struct eeprom_93cx6	eeprom;
+	struct regulator	*vdd_reg;
 };
 };
 
 
 static int msg_enable;
 static int msg_enable;
@@ -1414,6 +1417,21 @@ static int ks8851_probe(struct spi_device *spi)
 	ks->spidev = spi;
 	ks->spidev = spi;
 	ks->tx_space = 6144;
 	ks->tx_space = 6144;
 
 
+	ks->vdd_reg = regulator_get_optional(&spi->dev, "vdd");
+	if (IS_ERR(ks->vdd_reg)) {
+		ret = PTR_ERR(ks->vdd_reg);
+		if (ret == -EPROBE_DEFER)
+			goto err_reg;
+	} else {
+		ret = regulator_enable(ks->vdd_reg);
+		if (ret) {
+			dev_err(&spi->dev, "regulator enable fail: %d\n",
+				ret);
+			goto err_reg_en;
+		}
+	}
+
+
 	mutex_init(&ks->lock);
 	mutex_init(&ks->lock);
 	spin_lock_init(&ks->statelock);
 	spin_lock_init(&ks->statelock);
 
 
@@ -1508,8 +1526,14 @@ static int ks8851_probe(struct spi_device *spi)
 err_netdev:
 err_netdev:
 	free_irq(ndev->irq, ks);
 	free_irq(ndev->irq, ks);
 
 
-err_id:
 err_irq:
 err_irq:
+err_id:
+	if (!IS_ERR(ks->vdd_reg))
+		regulator_disable(ks->vdd_reg);
+err_reg_en:
+	if (!IS_ERR(ks->vdd_reg))
+		regulator_put(ks->vdd_reg);
+err_reg:
 	free_netdev(ndev);
 	free_netdev(ndev);
 	return ret;
 	return ret;
 }
 }
@@ -1523,6 +1547,10 @@ static int ks8851_remove(struct spi_device *spi)
 
 
 	unregister_netdev(priv->netdev);
 	unregister_netdev(priv->netdev);
 	free_irq(spi->irq, priv);
 	free_irq(spi->irq, priv);
+	if (!IS_ERR(priv->vdd_reg)) {
+		regulator_disable(priv->vdd_reg);
+		regulator_put(priv->vdd_reg);
+	}
 	free_netdev(priv->netdev);
 	free_netdev(priv->netdev);
 
 
 	return 0;
 	return 0;

+ 3 - 1
drivers/net/ethernet/qlogic/qlge/qlge_main.c

@@ -4765,7 +4765,9 @@ static int qlge_probe(struct pci_dev *pdev,
 	ndev->features = ndev->hw_features;
 	ndev->features = ndev->hw_features;
 	ndev->vlan_features = ndev->hw_features;
 	ndev->vlan_features = ndev->hw_features;
 	/* vlan gets same features (except vlan filter) */
 	/* vlan gets same features (except vlan filter) */
-	ndev->vlan_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+	ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
+				 NETIF_F_HW_VLAN_CTAG_TX |
+				 NETIF_F_HW_VLAN_CTAG_RX);
 
 
 	if (test_bit(QL_DMA64, &qdev->flags))
 	if (test_bit(QL_DMA64, &qdev->flags))
 		ndev->features |= NETIF_F_HIGHDMA;
 		ndev->features |= NETIF_F_HIGHDMA;

+ 0 - 4
drivers/net/ethernet/ti/cpsw.c

@@ -2229,10 +2229,6 @@ static int cpsw_probe(struct platform_device *pdev)
 		goto clean_ale_ret;
 		goto clean_ale_ret;
 	}
 	}
 
 
-	if (cpts_register(&pdev->dev, priv->cpts,
-			  data->cpts_clock_mult, data->cpts_clock_shift))
-		dev_err(priv->dev, "error registering cpts device\n");
-
 	cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n",
 	cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n",
 		    &ss_res->start, ndev->irq);
 		    &ss_res->start, ndev->irq);
 
 

+ 2 - 2
drivers/net/ethernet/ti/davinci_cpdma.c

@@ -355,7 +355,7 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
 	int i;
 	int i;
 
 
 	spin_lock_irqsave(&ctlr->lock, flags);
 	spin_lock_irqsave(&ctlr->lock, flags);
-	if (ctlr->state != CPDMA_STATE_ACTIVE) {
+	if (ctlr->state == CPDMA_STATE_TEARDOWN) {
 		spin_unlock_irqrestore(&ctlr->lock, flags);
 		spin_unlock_irqrestore(&ctlr->lock, flags);
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
@@ -891,7 +891,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
 	unsigned		timeout;
 	unsigned		timeout;
 
 
 	spin_lock_irqsave(&chan->lock, flags);
 	spin_lock_irqsave(&chan->lock, flags);
-	if (chan->state != CPDMA_STATE_ACTIVE) {
+	if (chan->state == CPDMA_STATE_TEARDOWN) {
 		spin_unlock_irqrestore(&chan->lock, flags);
 		spin_unlock_irqrestore(&chan->lock, flags);
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}

+ 42 - 11
drivers/net/ethernet/ti/davinci_emac.c

@@ -1532,9 +1532,9 @@ static int emac_dev_open(struct net_device *ndev)
 	struct device *emac_dev = &ndev->dev;
 	struct device *emac_dev = &ndev->dev;
 	u32 cnt;
 	u32 cnt;
 	struct resource *res;
 	struct resource *res;
-	int ret;
+	int q, m, ret;
+	int res_num = 0, irq_num = 0;
 	int i = 0;
 	int i = 0;
-	int k = 0;
 	struct emac_priv *priv = netdev_priv(ndev);
 	struct emac_priv *priv = netdev_priv(ndev);
 
 
 	pm_runtime_get(&priv->pdev->dev);
 	pm_runtime_get(&priv->pdev->dev);
@@ -1564,15 +1564,24 @@ static int emac_dev_open(struct net_device *ndev)
 	}
 	}
 
 
 	/* Request IRQ */
 	/* Request IRQ */
+	while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ,
+					    res_num))) {
+		for (irq_num = res->start; irq_num <= res->end; irq_num++) {
+			dev_err(emac_dev, "Request IRQ %d\n", irq_num);
+			if (request_irq(irq_num, emac_irq, 0, ndev->name,
+					ndev)) {
+				dev_err(emac_dev,
+					"DaVinci EMAC: request_irq() failed\n");
+				ret = -EBUSY;
 
 
-	while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
-		for (i = res->start; i <= res->end; i++) {
-			if (devm_request_irq(&priv->pdev->dev, i, emac_irq,
-					     0, ndev->name, ndev))
 				goto rollback;
 				goto rollback;
+			}
 		}
 		}
-		k++;
+		res_num++;
 	}
 	}
+	/* prepare counters for rollback in case of an error */
+	res_num--;
+	irq_num--;
 
 
 	/* Start/Enable EMAC hardware */
 	/* Start/Enable EMAC hardware */
 	emac_hw_enable(priv);
 	emac_hw_enable(priv);
@@ -1639,11 +1648,23 @@ static int emac_dev_open(struct net_device *ndev)
 
 
 	return 0;
 	return 0;
 
 
-rollback:
-
-	dev_err(emac_dev, "DaVinci EMAC: devm_request_irq() failed");
-	ret = -EBUSY;
 err:
 err:
+	emac_int_disable(priv);
+	napi_disable(&priv->napi);
+
+rollback:
+	for (q = res_num; q >= 0; q--) {
+		res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, q);
+		/* at the first iteration, irq_num is already set to the
+		 * right value
+		 */
+		if (q != res_num)
+			irq_num = res->end;
+
+		for (m = irq_num; m >= res->start; m--)
+			free_irq(m, ndev);
+	}
+	cpdma_ctlr_stop(priv->dma);
 	pm_runtime_put(&priv->pdev->dev);
 	pm_runtime_put(&priv->pdev->dev);
 	return ret;
 	return ret;
 }
 }
@@ -1659,6 +1680,9 @@ err:
  */
  */
 static int emac_dev_stop(struct net_device *ndev)
 static int emac_dev_stop(struct net_device *ndev)
 {
 {
+	struct resource *res;
+	int i = 0;
+	int irq_num;
 	struct emac_priv *priv = netdev_priv(ndev);
 	struct emac_priv *priv = netdev_priv(ndev);
 	struct device *emac_dev = &ndev->dev;
 	struct device *emac_dev = &ndev->dev;
 
 
@@ -1674,6 +1698,13 @@ static int emac_dev_stop(struct net_device *ndev)
 	if (priv->phydev)
 	if (priv->phydev)
 		phy_disconnect(priv->phydev);
 		phy_disconnect(priv->phydev);
 
 
+	/* Free IRQ */
+	while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) {
+		for (irq_num = res->start; irq_num <= res->end; irq_num++)
+			free_irq(irq_num, priv->ndev);
+		i++;
+	}
+
 	if (netif_msg_drv(priv))
 	if (netif_msg_drv(priv))
 		dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name);
 		dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name);
 
 

+ 5 - 3
drivers/net/ethernet/via/via-rhine.c

@@ -923,7 +923,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	if (rc) {
 	if (rc) {
 		dev_err(&pdev->dev,
 		dev_err(&pdev->dev,
 			"32-bit PCI DMA addresses not supported by the card!?\n");
 			"32-bit PCI DMA addresses not supported by the card!?\n");
-		goto err_out;
+		goto err_out_pci_disable;
 	}
 	}
 
 
 	/* sanity check */
 	/* sanity check */
@@ -931,7 +931,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	    (pci_resource_len(pdev, 1) < io_size)) {
 	    (pci_resource_len(pdev, 1) < io_size)) {
 		rc = -EIO;
 		rc = -EIO;
 		dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
 		dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
-		goto err_out;
+		goto err_out_pci_disable;
 	}
 	}
 
 
 	pioaddr = pci_resource_start(pdev, 0);
 	pioaddr = pci_resource_start(pdev, 0);
@@ -942,7 +942,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	dev = alloc_etherdev(sizeof(struct rhine_private));
 	dev = alloc_etherdev(sizeof(struct rhine_private));
 	if (!dev) {
 	if (!dev) {
 		rc = -ENOMEM;
 		rc = -ENOMEM;
-		goto err_out;
+		goto err_out_pci_disable;
 	}
 	}
 	SET_NETDEV_DEV(dev, &pdev->dev);
 	SET_NETDEV_DEV(dev, &pdev->dev);
 
 
@@ -1084,6 +1084,8 @@ err_out_free_res:
 	pci_release_regions(pdev);
 	pci_release_regions(pdev);
 err_out_free_netdev:
 err_out_free_netdev:
 	free_netdev(dev);
 	free_netdev(dev);
+err_out_pci_disable:
+	pci_disable_device(pdev);
 err_out:
 err_out:
 	return rc;
 	return rc;
 }
 }

+ 2 - 1
drivers/net/ifb.c

@@ -180,7 +180,8 @@ static void ifb_setup(struct net_device *dev)
 	dev->tx_queue_len = TX_Q_LIMIT;
 	dev->tx_queue_len = TX_Q_LIMIT;
 
 
 	dev->features |= IFB_FEATURES;
 	dev->features |= IFB_FEATURES;
-	dev->vlan_features |= IFB_FEATURES;
+	dev->vlan_features |= IFB_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX |
+					       NETIF_F_HW_VLAN_STAG_TX);
 
 
 	dev->flags |= IFF_NOARP;
 	dev->flags |= IFF_NOARP;
 	dev->flags &= ~IFF_MULTICAST;
 	dev->flags &= ~IFF_MULTICAST;

+ 1 - 2
drivers/net/phy/phy_device.c

@@ -683,10 +683,9 @@ EXPORT_SYMBOL(phy_detach);
 int phy_suspend(struct phy_device *phydev)
 int phy_suspend(struct phy_device *phydev)
 {
 {
 	struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver);
 	struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver);
-	struct ethtool_wolinfo wol;
+	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
 
 
 	/* If the device has WOL enabled, we cannot suspend the PHY */
 	/* If the device has WOL enabled, we cannot suspend the PHY */
-	wol.cmd = ETHTOOL_GWOL;
 	phy_ethtool_get_wol(phydev, &wol);
 	phy_ethtool_get_wol(phydev, &wol);
 	if (wol.wolopts)
 	if (wol.wolopts)
 		return -EBUSY;
 		return -EBUSY;

+ 23 - 25
drivers/net/usb/cdc_ncm.c

@@ -68,7 +68,6 @@ static struct usb_driver cdc_ncm_driver;
 static int cdc_ncm_setup(struct usbnet *dev)
 static int cdc_ncm_setup(struct usbnet *dev)
 {
 {
 	struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
 	struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
-	struct usb_cdc_ncm_ntb_parameters ncm_parm;
 	u32 val;
 	u32 val;
 	u8 flags;
 	u8 flags;
 	u8 iface_no;
 	u8 iface_no;
@@ -82,22 +81,22 @@ static int cdc_ncm_setup(struct usbnet *dev)
 	err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS,
 	err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS,
 			      USB_TYPE_CLASS | USB_DIR_IN
 			      USB_TYPE_CLASS | USB_DIR_IN
 			      |USB_RECIP_INTERFACE,
 			      |USB_RECIP_INTERFACE,
-			      0, iface_no, &ncm_parm,
-			      sizeof(ncm_parm));
+			      0, iface_no, &ctx->ncm_parm,
+			      sizeof(ctx->ncm_parm));
 	if (err < 0) {
 	if (err < 0) {
 		dev_err(&dev->intf->dev, "failed GET_NTB_PARAMETERS\n");
 		dev_err(&dev->intf->dev, "failed GET_NTB_PARAMETERS\n");
 		return err; /* GET_NTB_PARAMETERS is required */
 		return err; /* GET_NTB_PARAMETERS is required */
 	}
 	}
 
 
 	/* read correct set of parameters according to device mode */
 	/* read correct set of parameters according to device mode */
-	ctx->rx_max = le32_to_cpu(ncm_parm.dwNtbInMaxSize);
-	ctx->tx_max = le32_to_cpu(ncm_parm.dwNtbOutMaxSize);
-	ctx->tx_remainder = le16_to_cpu(ncm_parm.wNdpOutPayloadRemainder);
-	ctx->tx_modulus = le16_to_cpu(ncm_parm.wNdpOutDivisor);
-	ctx->tx_ndp_modulus = le16_to_cpu(ncm_parm.wNdpOutAlignment);
+	ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize);
+	ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize);
+	ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
+	ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
+	ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
 	/* devices prior to NCM Errata shall set this field to zero */
 	/* devices prior to NCM Errata shall set this field to zero */
-	ctx->tx_max_datagrams = le16_to_cpu(ncm_parm.wNtbOutMaxDatagrams);
-	ntb_fmt_supported = le16_to_cpu(ncm_parm.bmNtbFormatsSupported);
+	ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
+	ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
 
 
 	/* there are some minor differences in NCM and MBIM defaults */
 	/* there are some minor differences in NCM and MBIM defaults */
 	if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) {
 	if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) {
@@ -146,7 +145,7 @@ static int cdc_ncm_setup(struct usbnet *dev)
 	}
 	}
 
 
 	/* inform device about NTB input size changes */
 	/* inform device about NTB input size changes */
-	if (ctx->rx_max != le32_to_cpu(ncm_parm.dwNtbInMaxSize)) {
+	if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
 		__le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
 		__le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
 
 
 		err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE,
 		err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE,
@@ -162,14 +161,6 @@ static int cdc_ncm_setup(struct usbnet *dev)
 		dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n",
 		dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n",
 			CDC_NCM_NTB_MAX_SIZE_TX);
 			CDC_NCM_NTB_MAX_SIZE_TX);
 		ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX;
 		ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX;
-
-		/* Adding a pad byte here simplifies the handling in
-		 * cdc_ncm_fill_tx_frame, by making tx_max always
-		 * represent the real skb max size.
-		 */
-		if (ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0)
-			ctx->tx_max++;
-
 	}
 	}
 
 
 	/*
 	/*
@@ -439,6 +430,10 @@ advance:
 		goto error2;
 		goto error2;
 	}
 	}
 
 
+	/* initialize data interface */
+	if (cdc_ncm_setup(dev))
+		goto error2;
+
 	/* configure data interface */
 	/* configure data interface */
 	temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
 	temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
 	if (temp) {
 	if (temp) {
@@ -453,12 +448,6 @@ advance:
 		goto error2;
 		goto error2;
 	}
 	}
 
 
-	/* initialize data interface */
-	if (cdc_ncm_setup(dev))	{
-		dev_dbg(&intf->dev, "cdc_ncm_setup() failed\n");
-		goto error2;
-	}
-
 	usb_set_intfdata(ctx->data, dev);
 	usb_set_intfdata(ctx->data, dev);
 	usb_set_intfdata(ctx->control, dev);
 	usb_set_intfdata(ctx->control, dev);
 
 
@@ -475,6 +464,15 @@ advance:
 	dev->hard_mtu = ctx->tx_max;
 	dev->hard_mtu = ctx->tx_max;
 	dev->rx_urb_size = ctx->rx_max;
 	dev->rx_urb_size = ctx->rx_max;
 
 
+	/* cdc_ncm_setup will override dwNtbOutMaxSize if it is
+	 * outside the sane range. Adding a pad byte here if necessary
+	 * simplifies the handling in cdc_ncm_fill_tx_frame, making
+	 * tx_max always represent the real skb max size.
+	 */
+	if (ctx->tx_max != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) &&
+	    ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0)
+		ctx->tx_max++;
+
 	return 0;
 	return 0;
 
 
 error2:
 error2:

+ 19 - 14
drivers/net/usb/usbnet.c

@@ -752,14 +752,12 @@ EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
 // precondition: never called in_interrupt
 // precondition: never called in_interrupt
 static void usbnet_terminate_urbs(struct usbnet *dev)
 static void usbnet_terminate_urbs(struct usbnet *dev)
 {
 {
-	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
 	DECLARE_WAITQUEUE(wait, current);
 	DECLARE_WAITQUEUE(wait, current);
 	int temp;
 	int temp;
 
 
 	/* ensure there are no more active urbs */
 	/* ensure there are no more active urbs */
-	add_wait_queue(&unlink_wakeup, &wait);
+	add_wait_queue(&dev->wait, &wait);
 	set_current_state(TASK_UNINTERRUPTIBLE);
 	set_current_state(TASK_UNINTERRUPTIBLE);
-	dev->wait = &unlink_wakeup;
 	temp = unlink_urbs(dev, &dev->txq) +
 	temp = unlink_urbs(dev, &dev->txq) +
 		unlink_urbs(dev, &dev->rxq);
 		unlink_urbs(dev, &dev->rxq);
 
 
@@ -773,15 +771,14 @@ static void usbnet_terminate_urbs(struct usbnet *dev)
 				  "waited for %d urb completions\n", temp);
 				  "waited for %d urb completions\n", temp);
 	}
 	}
 	set_current_state(TASK_RUNNING);
 	set_current_state(TASK_RUNNING);
-	dev->wait = NULL;
-	remove_wait_queue(&unlink_wakeup, &wait);
+	remove_wait_queue(&dev->wait, &wait);
 }
 }
 
 
 int usbnet_stop (struct net_device *net)
 int usbnet_stop (struct net_device *net)
 {
 {
 	struct usbnet		*dev = netdev_priv(net);
 	struct usbnet		*dev = netdev_priv(net);
 	struct driver_info	*info = dev->driver_info;
 	struct driver_info	*info = dev->driver_info;
-	int			retval;
+	int			retval, pm;
 
 
 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
 	netif_stop_queue (net);
 	netif_stop_queue (net);
@@ -791,6 +788,8 @@ int usbnet_stop (struct net_device *net)
 		   net->stats.rx_packets, net->stats.tx_packets,
 		   net->stats.rx_packets, net->stats.tx_packets,
 		   net->stats.rx_errors, net->stats.tx_errors);
 		   net->stats.rx_errors, net->stats.tx_errors);
 
 
+	/* to not race resume */
+	pm = usb_autopm_get_interface(dev->intf);
 	/* allow minidriver to stop correctly (wireless devices to turn off
 	/* allow minidriver to stop correctly (wireless devices to turn off
 	 * radio etc) */
 	 * radio etc) */
 	if (info->stop) {
 	if (info->stop) {
@@ -817,6 +816,9 @@ int usbnet_stop (struct net_device *net)
 	dev->flags = 0;
 	dev->flags = 0;
 	del_timer_sync (&dev->delay);
 	del_timer_sync (&dev->delay);
 	tasklet_kill (&dev->bh);
 	tasklet_kill (&dev->bh);
+	if (!pm)
+		usb_autopm_put_interface(dev->intf);
+
 	if (info->manage_power &&
 	if (info->manage_power &&
 	    !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
 	    !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
 		info->manage_power(dev, 0);
 		info->manage_power(dev, 0);
@@ -1437,11 +1439,12 @@ static void usbnet_bh (unsigned long param)
 	/* restart RX again after disabling due to high error rate */
 	/* restart RX again after disabling due to high error rate */
 	clear_bit(EVENT_RX_KILL, &dev->flags);
 	clear_bit(EVENT_RX_KILL, &dev->flags);
 
 
-	// waiting for all pending urbs to complete?
-	if (dev->wait) {
-		if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
-			wake_up (dev->wait);
-		}
+	/* waiting for all pending urbs to complete?
+	 * only then can we forgo submitting anew
+	 */
+	if (waitqueue_active(&dev->wait)) {
+		if (dev->txq.qlen + dev->rxq.qlen + dev->done.qlen == 0)
+			wake_up_all(&dev->wait);
 
 
 	// or are we maybe short a few urbs?
 	// or are we maybe short a few urbs?
 	} else if (netif_running (dev->net) &&
 	} else if (netif_running (dev->net) &&
@@ -1580,6 +1583,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
 	dev->driver_name = name;
 	dev->driver_name = name;
 	dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
 	dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
 				| NETIF_MSG_PROBE | NETIF_MSG_LINK);
 				| NETIF_MSG_PROBE | NETIF_MSG_LINK);
+	init_waitqueue_head(&dev->wait);
 	skb_queue_head_init (&dev->rxq);
 	skb_queue_head_init (&dev->rxq);
 	skb_queue_head_init (&dev->txq);
 	skb_queue_head_init (&dev->txq);
 	skb_queue_head_init (&dev->done);
 	skb_queue_head_init (&dev->done);
@@ -1791,9 +1795,10 @@ int usbnet_resume (struct usb_interface *intf)
 		spin_unlock_irq(&dev->txq.lock);
 		spin_unlock_irq(&dev->txq.lock);
 
 
 		if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
 		if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
-			/* handle remote wakeup ASAP */
-			if (!dev->wait &&
-				netif_device_present(dev->net) &&
+			/* handle remote wakeup ASAP
+			 * we cannot race against stop
+			 */
+			if (netif_device_present(dev->net) &&
 				!timer_pending(&dev->delay) &&
 				!timer_pending(&dev->delay) &&
 				!test_bit(EVENT_RX_HALT, &dev->flags))
 				!test_bit(EVENT_RX_HALT, &dev->flags))
 					rx_alloc_submit(dev, GFP_NOIO);
 					rx_alloc_submit(dev, GFP_NOIO);

+ 4 - 1
drivers/net/veth.c

@@ -286,7 +286,10 @@ static void veth_setup(struct net_device *dev)
 	dev->features |= NETIF_F_LLTX;
 	dev->features |= NETIF_F_LLTX;
 	dev->features |= VETH_FEATURES;
 	dev->features |= VETH_FEATURES;
 	dev->vlan_features = dev->features &
 	dev->vlan_features = dev->features &
-			     ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX);
+			     ~(NETIF_F_HW_VLAN_CTAG_TX |
+			       NETIF_F_HW_VLAN_STAG_TX |
+			       NETIF_F_HW_VLAN_CTAG_RX |
+			       NETIF_F_HW_VLAN_STAG_RX);
 	dev->destructor = veth_dev_free;
 	dev->destructor = veth_dev_free;
 
 
 	dev->hw_features = VETH_FEATURES;
 	dev->hw_features = VETH_FEATURES;

+ 3 - 3
drivers/net/virtio_net.c

@@ -671,8 +671,7 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
 		if (err)
 		if (err)
 			break;
 			break;
 	} while (rq->vq->num_free);
 	} while (rq->vq->num_free);
-	if (unlikely(!virtqueue_kick(rq->vq)))
-		return false;
+	virtqueue_kick(rq->vq);
 	return !oom;
 	return !oom;
 }
 }
 
 
@@ -877,7 +876,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
 	err = xmit_skb(sq, skb);
 	err = xmit_skb(sq, skb);
 
 
 	/* This should not happen! */
 	/* This should not happen! */
-	if (unlikely(err) || unlikely(!virtqueue_kick(sq->vq))) {
+	if (unlikely(err)) {
 		dev->stats.tx_fifo_errors++;
 		dev->stats.tx_fifo_errors++;
 		if (net_ratelimit())
 		if (net_ratelimit())
 			dev_warn(&dev->dev,
 			dev_warn(&dev->dev,
@@ -886,6 +885,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
 		kfree_skb(skb);
 		kfree_skb(skb);
 		return NETDEV_TX_OK;
 		return NETDEV_TX_OK;
 	}
 	}
+	virtqueue_kick(sq->vq);
 
 
 	/* Don't wait up for transmitted skbs to be freed. */
 	/* Don't wait up for transmitted skbs to be freed. */
 	skb_orphan(skb);
 	skb_orphan(skb);

+ 116 - 14
drivers/net/vxlan.c

@@ -1318,6 +1318,9 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
 
 
 		neigh_release(n);
 		neigh_release(n);
 
 
+		if (reply == NULL)
+			goto out;
+
 		skb_reset_mac_header(reply);
 		skb_reset_mac_header(reply);
 		__skb_pull(reply, skb_network_offset(reply));
 		__skb_pull(reply, skb_network_offset(reply));
 		reply->ip_summed = CHECKSUM_UNNECESSARY;
 		reply->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1339,15 +1342,103 @@ out:
 }
 }
 
 
 #if IS_ENABLED(CONFIG_IPV6)
 #if IS_ENABLED(CONFIG_IPV6)
+
+static struct sk_buff *vxlan_na_create(struct sk_buff *request,
+	struct neighbour *n, bool isrouter)
+{
+	struct net_device *dev = request->dev;
+	struct sk_buff *reply;
+	struct nd_msg *ns, *na;
+	struct ipv6hdr *pip6;
+	u8 *daddr;
+	int na_olen = 8; /* opt hdr + ETH_ALEN for target */
+	int ns_olen;
+	int i, len;
+
+	if (dev == NULL)
+		return NULL;
+
+	len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
+		sizeof(*na) + na_olen + dev->needed_tailroom;
+	reply = alloc_skb(len, GFP_ATOMIC);
+	if (reply == NULL)
+		return NULL;
+
+	reply->protocol = htons(ETH_P_IPV6);
+	reply->dev = dev;
+	skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
+	skb_push(reply, sizeof(struct ethhdr));
+	skb_set_mac_header(reply, 0);
+
+	ns = (struct nd_msg *)skb_transport_header(request);
+
+	daddr = eth_hdr(request)->h_source;
+	ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns);
+	for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
+		if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
+			daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
+			break;
+		}
+	}
+
+	/* Ethernet header */
+	ether_addr_copy(eth_hdr(reply)->h_dest, daddr);
+	ether_addr_copy(eth_hdr(reply)->h_source, n->ha);
+	eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
+	reply->protocol = htons(ETH_P_IPV6);
+
+	skb_pull(reply, sizeof(struct ethhdr));
+	skb_set_network_header(reply, 0);
+	skb_put(reply, sizeof(struct ipv6hdr));
+
+	/* IPv6 header */
+
+	pip6 = ipv6_hdr(reply);
+	memset(pip6, 0, sizeof(struct ipv6hdr));
+	pip6->version = 6;
+	pip6->priority = ipv6_hdr(request)->priority;
+	pip6->nexthdr = IPPROTO_ICMPV6;
+	pip6->hop_limit = 255;
+	pip6->daddr = ipv6_hdr(request)->saddr;
+	pip6->saddr = *(struct in6_addr *)n->primary_key;
+
+	skb_pull(reply, sizeof(struct ipv6hdr));
+	skb_set_transport_header(reply, 0);
+
+	na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen);
+
+	/* Neighbor Advertisement */
+	memset(na, 0, sizeof(*na)+na_olen);
+	na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
+	na->icmph.icmp6_router = isrouter;
+	na->icmph.icmp6_override = 1;
+	na->icmph.icmp6_solicited = 1;
+	na->target = ns->target;
+	ether_addr_copy(&na->opt[2], n->ha);
+	na->opt[0] = ND_OPT_TARGET_LL_ADDR;
+	na->opt[1] = na_olen >> 3;
+
+	na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
+		&pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
+		csum_partial(na, sizeof(*na)+na_olen, 0));
+
+	pip6->payload_len = htons(sizeof(*na)+na_olen);
+
+	skb_push(reply, sizeof(struct ipv6hdr));
+
+	reply->ip_summed = CHECKSUM_UNNECESSARY;
+
+	return reply;
+}
+
 static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
 static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
 {
 {
 	struct vxlan_dev *vxlan = netdev_priv(dev);
 	struct vxlan_dev *vxlan = netdev_priv(dev);
-	struct neighbour *n;
-	union vxlan_addr ipa;
+	struct nd_msg *msg;
 	const struct ipv6hdr *iphdr;
 	const struct ipv6hdr *iphdr;
 	const struct in6_addr *saddr, *daddr;
 	const struct in6_addr *saddr, *daddr;
-	struct nd_msg *msg;
-	struct inet6_dev *in6_dev = NULL;
+	struct neighbour *n;
+	struct inet6_dev *in6_dev;
 
 
 	in6_dev = __in6_dev_get(dev);
 	in6_dev = __in6_dev_get(dev);
 	if (!in6_dev)
 	if (!in6_dev)
@@ -1360,19 +1451,20 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
 	saddr = &iphdr->saddr;
 	saddr = &iphdr->saddr;
 	daddr = &iphdr->daddr;
 	daddr = &iphdr->daddr;
 
 
-	if (ipv6_addr_loopback(daddr) ||
-	    ipv6_addr_is_multicast(daddr))
-		goto out;
-
 	msg = (struct nd_msg *)skb_transport_header(skb);
 	msg = (struct nd_msg *)skb_transport_header(skb);
 	if (msg->icmph.icmp6_code != 0 ||
 	if (msg->icmph.icmp6_code != 0 ||
 	    msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
 	    msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
 		goto out;
 		goto out;
 
 
-	n = neigh_lookup(ipv6_stub->nd_tbl, daddr, dev);
+	if (ipv6_addr_loopback(daddr) ||
+	    ipv6_addr_is_multicast(&msg->target))
+		goto out;
+
+	n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
 
 
 	if (n) {
 	if (n) {
 		struct vxlan_fdb *f;
 		struct vxlan_fdb *f;
+		struct sk_buff *reply;
 
 
 		if (!(n->nud_state & NUD_CONNECTED)) {
 		if (!(n->nud_state & NUD_CONNECTED)) {
 			neigh_release(n);
 			neigh_release(n);
@@ -1386,13 +1478,23 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
 			goto out;
 			goto out;
 		}
 		}
 
 
-		ipv6_stub->ndisc_send_na(dev, n, saddr, &msg->target,
-					 !!in6_dev->cnf.forwarding,
-					 true, false, false);
+		reply = vxlan_na_create(skb, n,
+					!!(f ? f->flags & NTF_ROUTER : 0));
+
 		neigh_release(n);
 		neigh_release(n);
+
+		if (reply == NULL)
+			goto out;
+
+		if (netif_rx_ni(reply) == NET_RX_DROP)
+			dev->stats.rx_dropped++;
+
 	} else if (vxlan->flags & VXLAN_F_L3MISS) {
 	} else if (vxlan->flags & VXLAN_F_L3MISS) {
-		ipa.sin6.sin6_addr = *daddr;
-		ipa.sa.sa_family = AF_INET6;
+		union vxlan_addr ipa = {
+			.sin6.sin6_addr = msg->target,
+			.sa.sa_family = AF_INET6,
+		};
+
 		vxlan_ip_miss(dev, &ipa);
 		vxlan_ip_miss(dev, &ipa);
 	}
 	}
 
 

+ 1 - 2
drivers/net/wireless/ath/ath9k/hw.c

@@ -1548,6 +1548,7 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
 		if (reg != last_val)
 		if (reg != last_val)
 			return true;
 			return true;
 
 
+		udelay(1);
 		last_val = reg;
 		last_val = reg;
 		if ((reg & 0x7E7FFFEF) == 0x00702400)
 		if ((reg & 0x7E7FFFEF) == 0x00702400)
 			continue;
 			continue;
@@ -1560,8 +1561,6 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
 		default:
 		default:
 			return true;
 			return true;
 		}
 		}
-
-		udelay(1);
 	} while (count-- > 0);
 	} while (count-- > 0);
 
 
 	return false;
 	return false;

+ 2 - 2
drivers/net/wireless/ath/ath9k/xmit.c

@@ -2063,7 +2063,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
 
 
 	ATH_TXBUF_RESET(bf);
 	ATH_TXBUF_RESET(bf);
 
 
-	if (tid) {
+	if (tid && ieee80211_is_data_present(hdr->frame_control)) {
 		fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
 		fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
 		seqno = tid->seq_next;
 		seqno = tid->seq_next;
 		hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
 		hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
@@ -2186,7 +2186,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
 		txq->stopped = true;
 		txq->stopped = true;
 	}
 	}
 
 
-	if (txctl->an)
+	if (txctl->an && ieee80211_is_data_present(hdr->frame_control))
 		tid = ath_get_skb_tid(sc, txctl->an, skb);
 		tid = ath_get_skb_tid(sc, txctl->an, skb);
 
 
 	if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) {
 	if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) {

+ 3 - 1
drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c

@@ -1948,8 +1948,10 @@ static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus,
 		if (pkt_pad == NULL)
 		if (pkt_pad == NULL)
 			return -ENOMEM;
 			return -ENOMEM;
 		ret = brcmf_sdio_txpkt_hdalign(bus, pkt_pad);
 		ret = brcmf_sdio_txpkt_hdalign(bus, pkt_pad);
-		if (unlikely(ret < 0))
+		if (unlikely(ret < 0)) {
+			kfree_skb(pkt_pad);
 			return ret;
 			return ret;
+		}
 		memcpy(pkt_pad->data,
 		memcpy(pkt_pad->data,
 		       pkt->data + pkt->len - tail_chop,
 		       pkt->data + pkt->len - tail_chop,
 		       tail_chop);
 		       tail_chop);

+ 3 - 3
drivers/net/wireless/rt2x00/rt2800lib.c

@@ -5460,14 +5460,15 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
 
 
 	rt2800_bbp_write(rt2x00dev, 68, 0x0b);
 	rt2800_bbp_write(rt2x00dev, 68, 0x0b);
 
 
-	rt2800_bbp_write(rt2x00dev, 69, 0x0d);
-	rt2800_bbp_write(rt2x00dev, 70, 0x06);
+	rt2800_bbp_write(rt2x00dev, 69, 0x12);
 	rt2800_bbp_write(rt2x00dev, 73, 0x13);
 	rt2800_bbp_write(rt2x00dev, 73, 0x13);
 	rt2800_bbp_write(rt2x00dev, 75, 0x46);
 	rt2800_bbp_write(rt2x00dev, 75, 0x46);
 	rt2800_bbp_write(rt2x00dev, 76, 0x28);
 	rt2800_bbp_write(rt2x00dev, 76, 0x28);
 
 
 	rt2800_bbp_write(rt2x00dev, 77, 0x59);
 	rt2800_bbp_write(rt2x00dev, 77, 0x59);
 
 
+	rt2800_bbp_write(rt2x00dev, 70, 0x0a);
+
 	rt2800_bbp_write(rt2x00dev, 79, 0x13);
 	rt2800_bbp_write(rt2x00dev, 79, 0x13);
 	rt2800_bbp_write(rt2x00dev, 80, 0x05);
 	rt2800_bbp_write(rt2x00dev, 80, 0x05);
 	rt2800_bbp_write(rt2x00dev, 81, 0x33);
 	rt2800_bbp_write(rt2x00dev, 81, 0x33);
@@ -5510,7 +5511,6 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
 	if (rt2x00_rt(rt2x00dev, RT5392)) {
 	if (rt2x00_rt(rt2x00dev, RT5392)) {
 		rt2800_bbp_write(rt2x00dev, 134, 0xd0);
 		rt2800_bbp_write(rt2x00dev, 134, 0xd0);
 		rt2800_bbp_write(rt2x00dev, 135, 0xf6);
 		rt2800_bbp_write(rt2x00dev, 135, 0xf6);
-		rt2800_bbp_write(rt2x00dev, 148, 0x84);
 	}
 	}
 
 
 	rt2800_disable_unused_dac_adc(rt2x00dev);
 	rt2800_disable_unused_dac_adc(rt2x00dev);

+ 8 - 8
drivers/scsi/bnx2fc/bnx2fc_io.c

@@ -594,13 +594,13 @@ static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
 		mp_req->mp_resp_bd = NULL;
 		mp_req->mp_resp_bd = NULL;
 	}
 	}
 	if (mp_req->req_buf) {
 	if (mp_req->req_buf) {
-		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+		dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
 				     mp_req->req_buf,
 				     mp_req->req_buf,
 				     mp_req->req_buf_dma);
 				     mp_req->req_buf_dma);
 		mp_req->req_buf = NULL;
 		mp_req->req_buf = NULL;
 	}
 	}
 	if (mp_req->resp_buf) {
 	if (mp_req->resp_buf) {
-		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+		dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
 				     mp_req->resp_buf,
 				     mp_req->resp_buf,
 				     mp_req->resp_buf_dma);
 				     mp_req->resp_buf_dma);
 		mp_req->resp_buf = NULL;
 		mp_req->resp_buf = NULL;
@@ -622,7 +622,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
 
 
 	mp_req->req_len = sizeof(struct fcp_cmnd);
 	mp_req->req_len = sizeof(struct fcp_cmnd);
 	io_req->data_xfer_len = mp_req->req_len;
 	io_req->data_xfer_len = mp_req->req_len;
-	mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+	mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
 					     &mp_req->req_buf_dma,
 					     &mp_req->req_buf_dma,
 					     GFP_ATOMIC);
 					     GFP_ATOMIC);
 	if (!mp_req->req_buf) {
 	if (!mp_req->req_buf) {
@@ -631,7 +631,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
 		return FAILED;
 		return FAILED;
 	}
 	}
 
 
-	mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+	mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
 					      &mp_req->resp_buf_dma,
 					      &mp_req->resp_buf_dma,
 					      GFP_ATOMIC);
 					      GFP_ATOMIC);
 	if (!mp_req->resp_buf) {
 	if (!mp_req->resp_buf) {
@@ -639,8 +639,8 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
 		bnx2fc_free_mp_resc(io_req);
 		bnx2fc_free_mp_resc(io_req);
 		return FAILED;
 		return FAILED;
 	}
 	}
-	memset(mp_req->req_buf, 0, PAGE_SIZE);
-	memset(mp_req->resp_buf, 0, PAGE_SIZE);
+	memset(mp_req->req_buf, 0, CNIC_PAGE_SIZE);
+	memset(mp_req->resp_buf, 0, CNIC_PAGE_SIZE);
 
 
 	/* Allocate and map mp_req_bd and mp_resp_bd */
 	/* Allocate and map mp_req_bd and mp_resp_bd */
 	sz = sizeof(struct fcoe_bd_ctx);
 	sz = sizeof(struct fcoe_bd_ctx);
@@ -665,7 +665,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
 	mp_req_bd = mp_req->mp_req_bd;
 	mp_req_bd = mp_req->mp_req_bd;
 	mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff;
 	mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff;
 	mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32);
 	mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32);
-	mp_req_bd->buf_len = PAGE_SIZE;
+	mp_req_bd->buf_len = CNIC_PAGE_SIZE;
 	mp_req_bd->flags = 0;
 	mp_req_bd->flags = 0;
 
 
 	/*
 	/*
@@ -677,7 +677,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
 	addr = mp_req->resp_buf_dma;
 	addr = mp_req->resp_buf_dma;
 	mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff;
 	mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff;
 	mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32);
 	mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32);
-	mp_resp_bd->buf_len = PAGE_SIZE;
+	mp_resp_bd->buf_len = CNIC_PAGE_SIZE;
 	mp_resp_bd->flags = 0;
 	mp_resp_bd->flags = 0;
 
 
 	return SUCCESS;
 	return SUCCESS;

+ 21 - 17
drivers/scsi/bnx2fc/bnx2fc_tgt.c

@@ -673,7 +673,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
 
 
 	/* Allocate and map SQ */
 	/* Allocate and map SQ */
 	tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE;
 	tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE;
-	tgt->sq_mem_size = (tgt->sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+	tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+			   CNIC_PAGE_MASK;
 
 
 	tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
 	tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
 				     &tgt->sq_dma, GFP_KERNEL);
 				     &tgt->sq_dma, GFP_KERNEL);
@@ -686,7 +687,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
 
 
 	/* Allocate and map CQ */
 	/* Allocate and map CQ */
 	tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE;
 	tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE;
-	tgt->cq_mem_size = (tgt->cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+	tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+			   CNIC_PAGE_MASK;
 
 
 	tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
 	tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
 				     &tgt->cq_dma, GFP_KERNEL);
 				     &tgt->cq_dma, GFP_KERNEL);
@@ -699,7 +701,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
 
 
 	/* Allocate and map RQ and RQ PBL */
 	/* Allocate and map RQ and RQ PBL */
 	tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE;
 	tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE;
-	tgt->rq_mem_size = (tgt->rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+	tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+			   CNIC_PAGE_MASK;
 
 
 	tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
 	tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
 					&tgt->rq_dma, GFP_KERNEL);
 					&tgt->rq_dma, GFP_KERNEL);
@@ -710,8 +713,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
 	}
 	}
 	memset(tgt->rq, 0, tgt->rq_mem_size);
 	memset(tgt->rq, 0, tgt->rq_mem_size);
 
 
-	tgt->rq_pbl_size = (tgt->rq_mem_size / PAGE_SIZE) * sizeof(void *);
-	tgt->rq_pbl_size = (tgt->rq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+	tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
+	tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) &
+			   CNIC_PAGE_MASK;
 
 
 	tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
 	tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
 					 &tgt->rq_pbl_dma, GFP_KERNEL);
 					 &tgt->rq_pbl_dma, GFP_KERNEL);
@@ -722,7 +726,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
 	}
 	}
 
 
 	memset(tgt->rq_pbl, 0, tgt->rq_pbl_size);
 	memset(tgt->rq_pbl, 0, tgt->rq_pbl_size);
-	num_pages = tgt->rq_mem_size / PAGE_SIZE;
+	num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE;
 	page = tgt->rq_dma;
 	page = tgt->rq_dma;
 	pbl = (u32 *)tgt->rq_pbl;
 	pbl = (u32 *)tgt->rq_pbl;
 
 
@@ -731,13 +735,13 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
 		pbl++;
 		pbl++;
 		*pbl = (u32)((u64)page >> 32);
 		*pbl = (u32)((u64)page >> 32);
 		pbl++;
 		pbl++;
-		page += PAGE_SIZE;
+		page += CNIC_PAGE_SIZE;
 	}
 	}
 
 
 	/* Allocate and map XFERQ */
 	/* Allocate and map XFERQ */
 	tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE;
 	tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE;
-	tgt->xferq_mem_size = (tgt->xferq_mem_size + (PAGE_SIZE - 1)) &
-			       PAGE_MASK;
+	tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+			       CNIC_PAGE_MASK;
 
 
 	tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
 	tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
 					&tgt->xferq_dma, GFP_KERNEL);
 					&tgt->xferq_dma, GFP_KERNEL);
@@ -750,8 +754,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
 
 
 	/* Allocate and map CONFQ & CONFQ PBL */
 	/* Allocate and map CONFQ & CONFQ PBL */
 	tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE;
 	tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE;
-	tgt->confq_mem_size = (tgt->confq_mem_size + (PAGE_SIZE - 1)) &
-			       PAGE_MASK;
+	tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+			       CNIC_PAGE_MASK;
 
 
 	tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
 	tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
 					&tgt->confq_dma, GFP_KERNEL);
 					&tgt->confq_dma, GFP_KERNEL);
@@ -763,9 +767,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
 	memset(tgt->confq, 0, tgt->confq_mem_size);
 	memset(tgt->confq, 0, tgt->confq_mem_size);
 
 
 	tgt->confq_pbl_size =
 	tgt->confq_pbl_size =
-		(tgt->confq_mem_size / PAGE_SIZE) * sizeof(void *);
+		(tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
 	tgt->confq_pbl_size =
 	tgt->confq_pbl_size =
-		(tgt->confq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+		(tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
 
 
 	tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
 	tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
 					    tgt->confq_pbl_size,
 					    tgt->confq_pbl_size,
@@ -777,7 +781,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
 	}
 	}
 
 
 	memset(tgt->confq_pbl, 0, tgt->confq_pbl_size);
 	memset(tgt->confq_pbl, 0, tgt->confq_pbl_size);
-	num_pages = tgt->confq_mem_size / PAGE_SIZE;
+	num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE;
 	page = tgt->confq_dma;
 	page = tgt->confq_dma;
 	pbl = (u32 *)tgt->confq_pbl;
 	pbl = (u32 *)tgt->confq_pbl;
 
 
@@ -786,7 +790,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
 		pbl++;
 		pbl++;
 		*pbl = (u32)((u64)page >> 32);
 		*pbl = (u32)((u64)page >> 32);
 		pbl++;
 		pbl++;
-		page += PAGE_SIZE;
+		page += CNIC_PAGE_SIZE;
 	}
 	}
 
 
 	/* Allocate and map ConnDB */
 	/* Allocate and map ConnDB */
@@ -805,8 +809,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
 
 
 	/* Allocate and map LCQ */
 	/* Allocate and map LCQ */
 	tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE;
 	tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE;
-	tgt->lcq_mem_size = (tgt->lcq_mem_size + (PAGE_SIZE - 1)) &
-			     PAGE_MASK;
+	tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+			     CNIC_PAGE_MASK;
 
 
 	tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
 	tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
 				      &tgt->lcq_dma, GFP_KERNEL);
 				      &tgt->lcq_dma, GFP_KERNEL);

+ 26 - 26
drivers/scsi/bnx2i/bnx2i_hwi.c

@@ -61,7 +61,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
 	 * yield integral num of page buffers
 	 * yield integral num of page buffers
 	 */
 	 */
 	/* adjust SQ */
 	/* adjust SQ */
-	num_elements_per_pg = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
+	num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
 	if (hba->max_sqes < num_elements_per_pg)
 	if (hba->max_sqes < num_elements_per_pg)
 		hba->max_sqes = num_elements_per_pg;
 		hba->max_sqes = num_elements_per_pg;
 	else if (hba->max_sqes % num_elements_per_pg)
 	else if (hba->max_sqes % num_elements_per_pg)
@@ -69,7 +69,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
 				 ~(num_elements_per_pg - 1);
 				 ~(num_elements_per_pg - 1);
 
 
 	/* adjust CQ */
 	/* adjust CQ */
-	num_elements_per_pg = PAGE_SIZE / BNX2I_CQE_SIZE;
+	num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_CQE_SIZE;
 	if (hba->max_cqes < num_elements_per_pg)
 	if (hba->max_cqes < num_elements_per_pg)
 		hba->max_cqes = num_elements_per_pg;
 		hba->max_cqes = num_elements_per_pg;
 	else if (hba->max_cqes % num_elements_per_pg)
 	else if (hba->max_cqes % num_elements_per_pg)
@@ -77,7 +77,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
 				 ~(num_elements_per_pg - 1);
 				 ~(num_elements_per_pg - 1);
 
 
 	/* adjust RQ */
 	/* adjust RQ */
-	num_elements_per_pg = PAGE_SIZE / BNX2I_RQ_WQE_SIZE;
+	num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_RQ_WQE_SIZE;
 	if (hba->max_rqes < num_elements_per_pg)
 	if (hba->max_rqes < num_elements_per_pg)
 		hba->max_rqes = num_elements_per_pg;
 		hba->max_rqes = num_elements_per_pg;
 	else if (hba->max_rqes % num_elements_per_pg)
 	else if (hba->max_rqes % num_elements_per_pg)
@@ -959,7 +959,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
 
 
 	/* SQ page table */
 	/* SQ page table */
 	memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size);
 	memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size);
-	num_pages = ep->qp.sq_mem_size / PAGE_SIZE;
+	num_pages = ep->qp.sq_mem_size / CNIC_PAGE_SIZE;
 	page = ep->qp.sq_phys;
 	page = ep->qp.sq_phys;
 
 
 	if (cnic_dev_10g)
 	if (cnic_dev_10g)
@@ -973,7 +973,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
 			ptbl++;
 			ptbl++;
 			*ptbl = (u32) ((u64) page >> 32);
 			*ptbl = (u32) ((u64) page >> 32);
 			ptbl++;
 			ptbl++;
-			page += PAGE_SIZE;
+			page += CNIC_PAGE_SIZE;
 		} else {
 		} else {
 			/* PTE is written in big endian format for
 			/* PTE is written in big endian format for
 			 * 5706/5708/5709 devices */
 			 * 5706/5708/5709 devices */
@@ -981,13 +981,13 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
 			ptbl++;
 			ptbl++;
 			*ptbl = (u32) page;
 			*ptbl = (u32) page;
 			ptbl++;
 			ptbl++;
-			page += PAGE_SIZE;
+			page += CNIC_PAGE_SIZE;
 		}
 		}
 	}
 	}
 
 
 	/* RQ page table */
 	/* RQ page table */
 	memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size);
 	memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size);
-	num_pages = ep->qp.rq_mem_size / PAGE_SIZE;
+	num_pages = ep->qp.rq_mem_size / CNIC_PAGE_SIZE;
 	page = ep->qp.rq_phys;
 	page = ep->qp.rq_phys;
 
 
 	if (cnic_dev_10g)
 	if (cnic_dev_10g)
@@ -1001,7 +1001,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
 			ptbl++;
 			ptbl++;
 			*ptbl = (u32) ((u64) page >> 32);
 			*ptbl = (u32) ((u64) page >> 32);
 			ptbl++;
 			ptbl++;
-			page += PAGE_SIZE;
+			page += CNIC_PAGE_SIZE;
 		} else {
 		} else {
 			/* PTE is written in big endian format for
 			/* PTE is written in big endian format for
 			 * 5706/5708/5709 devices */
 			 * 5706/5708/5709 devices */
@@ -1009,13 +1009,13 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
 			ptbl++;
 			ptbl++;
 			*ptbl = (u32) page;
 			*ptbl = (u32) page;
 			ptbl++;
 			ptbl++;
-			page += PAGE_SIZE;
+			page += CNIC_PAGE_SIZE;
 		}
 		}
 	}
 	}
 
 
 	/* CQ page table */
 	/* CQ page table */
 	memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size);
 	memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size);
-	num_pages = ep->qp.cq_mem_size / PAGE_SIZE;
+	num_pages = ep->qp.cq_mem_size / CNIC_PAGE_SIZE;
 	page = ep->qp.cq_phys;
 	page = ep->qp.cq_phys;
 
 
 	if (cnic_dev_10g)
 	if (cnic_dev_10g)
@@ -1029,7 +1029,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
 			ptbl++;
 			ptbl++;
 			*ptbl = (u32) ((u64) page >> 32);
 			*ptbl = (u32) ((u64) page >> 32);
 			ptbl++;
 			ptbl++;
-			page += PAGE_SIZE;
+			page += CNIC_PAGE_SIZE;
 		} else {
 		} else {
 			/* PTE is written in big endian format for
 			/* PTE is written in big endian format for
 			 * 5706/5708/5709 devices */
 			 * 5706/5708/5709 devices */
@@ -1037,7 +1037,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
 			ptbl++;
 			ptbl++;
 			*ptbl = (u32) page;
 			*ptbl = (u32) page;
 			ptbl++;
 			ptbl++;
-			page += PAGE_SIZE;
+			page += CNIC_PAGE_SIZE;
 		}
 		}
 	}
 	}
 }
 }
@@ -1064,11 +1064,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
 	/* Allocate page table memory for SQ which is page aligned */
 	/* Allocate page table memory for SQ which is page aligned */
 	ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE;
 	ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE;
 	ep->qp.sq_mem_size =
 	ep->qp.sq_mem_size =
-		(ep->qp.sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+		(ep->qp.sq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
 	ep->qp.sq_pgtbl_size =
 	ep->qp.sq_pgtbl_size =
-		(ep->qp.sq_mem_size / PAGE_SIZE) * sizeof(void *);
+		(ep->qp.sq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
 	ep->qp.sq_pgtbl_size =
 	ep->qp.sq_pgtbl_size =
-		(ep->qp.sq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+		(ep->qp.sq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
 
 
 	ep->qp.sq_pgtbl_virt =
 	ep->qp.sq_pgtbl_virt =
 		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
 		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
@@ -1101,11 +1101,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
 	/* Allocate page table memory for CQ which is page aligned */
 	/* Allocate page table memory for CQ which is page aligned */
 	ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE;
 	ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE;
 	ep->qp.cq_mem_size =
 	ep->qp.cq_mem_size =
-		(ep->qp.cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+		(ep->qp.cq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
 	ep->qp.cq_pgtbl_size =
 	ep->qp.cq_pgtbl_size =
-		(ep->qp.cq_mem_size / PAGE_SIZE) * sizeof(void *);
+		(ep->qp.cq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
 	ep->qp.cq_pgtbl_size =
 	ep->qp.cq_pgtbl_size =
-		(ep->qp.cq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+		(ep->qp.cq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
 
 
 	ep->qp.cq_pgtbl_virt =
 	ep->qp.cq_pgtbl_virt =
 		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
 		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
@@ -1144,11 +1144,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
 	/* Allocate page table memory for RQ which is page aligned */
 	/* Allocate page table memory for RQ which is page aligned */
 	ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE;
 	ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE;
 	ep->qp.rq_mem_size =
 	ep->qp.rq_mem_size =
-		(ep->qp.rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+		(ep->qp.rq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
 	ep->qp.rq_pgtbl_size =
 	ep->qp.rq_pgtbl_size =
-		(ep->qp.rq_mem_size / PAGE_SIZE) * sizeof(void *);
+		(ep->qp.rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
 	ep->qp.rq_pgtbl_size =
 	ep->qp.rq_pgtbl_size =
-		(ep->qp.rq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+		(ep->qp.rq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
 
 
 	ep->qp.rq_pgtbl_virt =
 	ep->qp.rq_pgtbl_virt =
 		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
 		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
@@ -1270,7 +1270,7 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
 	bnx2i_adjust_qp_size(hba);
 	bnx2i_adjust_qp_size(hba);
 
 
 	iscsi_init.flags =
 	iscsi_init.flags =
-		ISCSI_PAGE_SIZE_4K << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT;
+		(CNIC_PAGE_BITS - 8) << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT;
 	if (en_tcp_dack)
 	if (en_tcp_dack)
 		iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE;
 		iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE;
 	iscsi_init.reserved0 = 0;
 	iscsi_init.reserved0 = 0;
@@ -1288,15 +1288,15 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
 			((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
 			((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
 	iscsi_init.num_ccells_per_conn = hba->num_ccell;
 	iscsi_init.num_ccells_per_conn = hba->num_ccell;
 	iscsi_init.num_tasks_per_conn = hba->max_sqes;
 	iscsi_init.num_tasks_per_conn = hba->max_sqes;
-	iscsi_init.sq_wqes_per_page = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
+	iscsi_init.sq_wqes_per_page = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
 	iscsi_init.sq_num_wqes = hba->max_sqes;
 	iscsi_init.sq_num_wqes = hba->max_sqes;
 	iscsi_init.cq_log_wqes_per_page =
 	iscsi_init.cq_log_wqes_per_page =
-		(u8) bnx2i_power_of2(PAGE_SIZE / BNX2I_CQE_SIZE);
+		(u8) bnx2i_power_of2(CNIC_PAGE_SIZE / BNX2I_CQE_SIZE);
 	iscsi_init.cq_num_wqes = hba->max_cqes;
 	iscsi_init.cq_num_wqes = hba->max_cqes;
 	iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE +
 	iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE +
-				   (PAGE_SIZE - 1)) / PAGE_SIZE;
+				   (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE;
 	iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE +
 	iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE +
-				   (PAGE_SIZE - 1)) / PAGE_SIZE;
+				   (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE;
 	iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE;
 	iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE;
 	iscsi_init.rq_num_wqes = hba->max_rqes;
 	iscsi_init.rq_num_wqes = hba->max_rqes;
 
 

+ 12 - 11
drivers/scsi/bnx2i/bnx2i_iscsi.c

@@ -525,7 +525,7 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
 	struct iscsi_bd *mp_bdt;
 	struct iscsi_bd *mp_bdt;
 	u64 addr;
 	u64 addr;
 
 
-	hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+	hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
 					    &hba->mp_bd_dma, GFP_KERNEL);
 					    &hba->mp_bd_dma, GFP_KERNEL);
 	if (!hba->mp_bd_tbl) {
 	if (!hba->mp_bd_tbl) {
 		printk(KERN_ERR "unable to allocate Middle Path BDT\n");
 		printk(KERN_ERR "unable to allocate Middle Path BDT\n");
@@ -533,11 +533,12 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
 		goto out;
 		goto out;
 	}
 	}
 
 
-	hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+	hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
+					       CNIC_PAGE_SIZE,
 					       &hba->dummy_buf_dma, GFP_KERNEL);
 					       &hba->dummy_buf_dma, GFP_KERNEL);
 	if (!hba->dummy_buffer) {
 	if (!hba->dummy_buffer) {
 		printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n");
 		printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n");
-		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+		dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
 				  hba->mp_bd_tbl, hba->mp_bd_dma);
 				  hba->mp_bd_tbl, hba->mp_bd_dma);
 		hba->mp_bd_tbl = NULL;
 		hba->mp_bd_tbl = NULL;
 		rc = -1;
 		rc = -1;
@@ -548,7 +549,7 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
 	addr = (unsigned long) hba->dummy_buf_dma;
 	addr = (unsigned long) hba->dummy_buf_dma;
 	mp_bdt->buffer_addr_lo = addr & 0xffffffff;
 	mp_bdt->buffer_addr_lo = addr & 0xffffffff;
 	mp_bdt->buffer_addr_hi = addr >> 32;
 	mp_bdt->buffer_addr_hi = addr >> 32;
-	mp_bdt->buffer_length = PAGE_SIZE;
+	mp_bdt->buffer_length = CNIC_PAGE_SIZE;
 	mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
 	mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
 			ISCSI_BD_FIRST_IN_BD_CHAIN;
 			ISCSI_BD_FIRST_IN_BD_CHAIN;
 out:
 out:
@@ -565,12 +566,12 @@ out:
 static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba)
 static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba)
 {
 {
 	if (hba->mp_bd_tbl) {
 	if (hba->mp_bd_tbl) {
-		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+		dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
 				  hba->mp_bd_tbl, hba->mp_bd_dma);
 				  hba->mp_bd_tbl, hba->mp_bd_dma);
 		hba->mp_bd_tbl = NULL;
 		hba->mp_bd_tbl = NULL;
 	}
 	}
 	if (hba->dummy_buffer) {
 	if (hba->dummy_buffer) {
-		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+		dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
 				  hba->dummy_buffer, hba->dummy_buf_dma);
 				  hba->dummy_buffer, hba->dummy_buf_dma);
 		hba->dummy_buffer = NULL;
 		hba->dummy_buffer = NULL;
 	}
 	}
@@ -934,14 +935,14 @@ static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba,
 					    struct bnx2i_conn *bnx2i_conn)
 					    struct bnx2i_conn *bnx2i_conn)
 {
 {
 	if (bnx2i_conn->gen_pdu.resp_bd_tbl) {
 	if (bnx2i_conn->gen_pdu.resp_bd_tbl) {
-		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+		dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
 				  bnx2i_conn->gen_pdu.resp_bd_tbl,
 				  bnx2i_conn->gen_pdu.resp_bd_tbl,
 				  bnx2i_conn->gen_pdu.resp_bd_dma);
 				  bnx2i_conn->gen_pdu.resp_bd_dma);
 		bnx2i_conn->gen_pdu.resp_bd_tbl = NULL;
 		bnx2i_conn->gen_pdu.resp_bd_tbl = NULL;
 	}
 	}
 
 
 	if (bnx2i_conn->gen_pdu.req_bd_tbl) {
 	if (bnx2i_conn->gen_pdu.req_bd_tbl) {
-		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+		dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
 				  bnx2i_conn->gen_pdu.req_bd_tbl,
 				  bnx2i_conn->gen_pdu.req_bd_tbl,
 				  bnx2i_conn->gen_pdu.req_bd_dma);
 				  bnx2i_conn->gen_pdu.req_bd_dma);
 		bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
 		bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
@@ -998,13 +999,13 @@ static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,
 	bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf;
 	bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf;
 
 
 	bnx2i_conn->gen_pdu.req_bd_tbl =
 	bnx2i_conn->gen_pdu.req_bd_tbl =
-		dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+		dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
 				   &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
 				   &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
 	if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL)
 	if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL)
 		goto login_req_bd_tbl_failure;
 		goto login_req_bd_tbl_failure;
 
 
 	bnx2i_conn->gen_pdu.resp_bd_tbl =
 	bnx2i_conn->gen_pdu.resp_bd_tbl =
-		dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+		dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
 				   &bnx2i_conn->gen_pdu.resp_bd_dma,
 				   &bnx2i_conn->gen_pdu.resp_bd_dma,
 				   GFP_KERNEL);
 				   GFP_KERNEL);
 	if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL)
 	if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL)
@@ -1013,7 +1014,7 @@ static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,
 	return 0;
 	return 0;
 
 
 login_resp_bd_tbl_failure:
 login_resp_bd_tbl_failure:
-	dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+	dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
 			  bnx2i_conn->gen_pdu.req_bd_tbl,
 			  bnx2i_conn->gen_pdu.req_bd_tbl,
 			  bnx2i_conn->gen_pdu.req_bd_dma);
 			  bnx2i_conn->gen_pdu.req_bd_dma);
 	bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
 	bnx2i_conn->gen_pdu.req_bd_tbl = NULL;

+ 10 - 12
drivers/tty/serial/sunhv.c

@@ -433,13 +433,10 @@ static void sunhv_console_write_paged(struct console *con, const char *s, unsign
 	unsigned long flags;
 	unsigned long flags;
 	int locked = 1;
 	int locked = 1;
 
 
-	local_irq_save(flags);
-	if (port->sysrq) {
-		locked = 0;
-	} else if (oops_in_progress) {
-		locked = spin_trylock(&port->lock);
-	} else
-		spin_lock(&port->lock);
+	if (port->sysrq || oops_in_progress)
+		locked = spin_trylock_irqsave(&port->lock, flags);
+	else
+		spin_lock_irqsave(&port->lock, flags);
 
 
 	while (n > 0) {
 	while (n > 0) {
 		unsigned long ra = __pa(con_write_page);
 		unsigned long ra = __pa(con_write_page);
@@ -470,8 +467,7 @@ static void sunhv_console_write_paged(struct console *con, const char *s, unsign
 	}
 	}
 
 
 	if (locked)
 	if (locked)
-		spin_unlock(&port->lock);
-	local_irq_restore(flags);
+		spin_unlock_irqrestore(&port->lock, flags);
 }
 }
 
 
 static inline void sunhv_console_putchar(struct uart_port *port, char c)
 static inline void sunhv_console_putchar(struct uart_port *port, char c)
@@ -492,7 +488,10 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig
 	unsigned long flags;
 	unsigned long flags;
 	int i, locked = 1;
 	int i, locked = 1;
 
 
-	local_irq_save(flags);
+	if (port->sysrq || oops_in_progress)
+		locked = spin_trylock_irqsave(&port->lock, flags);
+	else
+		spin_lock_irqsave(&port->lock, flags);
 	if (port->sysrq) {
 	if (port->sysrq) {
 		locked = 0;
 		locked = 0;
 	} else if (oops_in_progress) {
 	} else if (oops_in_progress) {
@@ -507,8 +506,7 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig
 	}
 	}
 
 
 	if (locked)
 	if (locked)
-		spin_unlock(&port->lock);
-	local_irq_restore(flags);
+		spin_unlock_irqrestore(&port->lock, flags);
 }
 }
 
 
 static struct console sunhv_console = {
 static struct console sunhv_console = {

+ 5 - 9
drivers/tty/serial/sunsab.c

@@ -844,20 +844,16 @@ static void sunsab_console_write(struct console *con, const char *s, unsigned n)
 	unsigned long flags;
 	unsigned long flags;
 	int locked = 1;
 	int locked = 1;
 
 
-	local_irq_save(flags);
-	if (up->port.sysrq) {
-		locked = 0;
-	} else if (oops_in_progress) {
-		locked = spin_trylock(&up->port.lock);
-	} else
-		spin_lock(&up->port.lock);
+	if (up->port.sysrq || oops_in_progress)
+		locked = spin_trylock_irqsave(&up->port.lock, flags);
+	else
+		spin_lock_irqsave(&up->port.lock, flags);
 
 
 	uart_console_write(&up->port, s, n, sunsab_console_putchar);
 	uart_console_write(&up->port, s, n, sunsab_console_putchar);
 	sunsab_tec_wait(up);
 	sunsab_tec_wait(up);
 
 
 	if (locked)
 	if (locked)
-		spin_unlock(&up->port.lock);
-	local_irq_restore(flags);
+		spin_unlock_irqrestore(&up->port.lock, flags);
 }
 }
 
 
 static int sunsab_console_setup(struct console *con, char *options)
 static int sunsab_console_setup(struct console *con, char *options)

+ 5 - 9
drivers/tty/serial/sunsu.c

@@ -1295,13 +1295,10 @@ static void sunsu_console_write(struct console *co, const char *s,
 	unsigned int ier;
 	unsigned int ier;
 	int locked = 1;
 	int locked = 1;
 
 
-	local_irq_save(flags);
-	if (up->port.sysrq) {
-		locked = 0;
-	} else if (oops_in_progress) {
-		locked = spin_trylock(&up->port.lock);
-	} else
-		spin_lock(&up->port.lock);
+	if (up->port.sysrq || oops_in_progress)
+		locked = spin_trylock_irqsave(&up->port.lock, flags);
+	else
+		spin_lock_irqsave(&up->port.lock, flags);
 
 
 	/*
 	/*
 	 *	First save the UER then disable the interrupts
 	 *	First save the UER then disable the interrupts
@@ -1319,8 +1316,7 @@ static void sunsu_console_write(struct console *co, const char *s,
 	serial_out(up, UART_IER, ier);
 	serial_out(up, UART_IER, ier);
 
 
 	if (locked)
 	if (locked)
-		spin_unlock(&up->port.lock);
-	local_irq_restore(flags);
+		spin_unlock_irqrestore(&up->port.lock, flags);
 }
 }
 
 
 /*
 /*

+ 5 - 9
drivers/tty/serial/sunzilog.c

@@ -1195,20 +1195,16 @@ sunzilog_console_write(struct console *con, const char *s, unsigned int count)
 	unsigned long flags;
 	unsigned long flags;
 	int locked = 1;
 	int locked = 1;
 
 
-	local_irq_save(flags);
-	if (up->port.sysrq) {
-		locked = 0;
-	} else if (oops_in_progress) {
-		locked = spin_trylock(&up->port.lock);
-	} else
-		spin_lock(&up->port.lock);
+	if (up->port.sysrq || oops_in_progress)
+		locked = spin_trylock_irqsave(&up->port.lock, flags);
+	else
+		spin_lock_irqsave(&up->port.lock, flags);
 
 
 	uart_console_write(&up->port, s, count, sunzilog_putchar);
 	uart_console_write(&up->port, s, count, sunzilog_putchar);
 	udelay(2);
 	udelay(2);
 
 
 	if (locked)
 	if (locked)
-		spin_unlock(&up->port.lock);
-	local_irq_restore(flags);
+		spin_unlock_irqrestore(&up->port.lock, flags);
 }
 }
 
 
 static int __init sunzilog_console_setup(struct console *con, char *options)
 static int __init sunzilog_console_setup(struct console *con, char *options)

+ 19 - 1
drivers/vhost/net.c

@@ -505,9 +505,13 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
 			r = -ENOBUFS;
 			r = -ENOBUFS;
 			goto err;
 			goto err;
 		}
 		}
-		d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
+		r = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
 				      ARRAY_SIZE(vq->iov) - seg, &out,
 				      ARRAY_SIZE(vq->iov) - seg, &out,
 				      &in, log, log_num);
 				      &in, log, log_num);
+		if (unlikely(r < 0))
+			goto err;
+
+		d = r;
 		if (d == vq->num) {
 		if (d == vq->num) {
 			r = 0;
 			r = 0;
 			goto err;
 			goto err;
@@ -532,6 +536,12 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
 	*iovcount = seg;
 	*iovcount = seg;
 	if (unlikely(log))
 	if (unlikely(log))
 		*log_num = nlogs;
 		*log_num = nlogs;
+
+	/* Detect overrun */
+	if (unlikely(datalen > 0)) {
+		r = UIO_MAXIOV + 1;
+		goto err;
+	}
 	return headcount;
 	return headcount;
 err:
 err:
 	vhost_discard_vq_desc(vq, headcount);
 	vhost_discard_vq_desc(vq, headcount);
@@ -587,6 +597,14 @@ static void handle_rx(struct vhost_net *net)
 		/* On error, stop handling until the next kick. */
 		/* On error, stop handling until the next kick. */
 		if (unlikely(headcount < 0))
 		if (unlikely(headcount < 0))
 			break;
 			break;
+		/* On overrun, truncate and discard */
+		if (unlikely(headcount > UIO_MAXIOV)) {
+			msg.msg_iovlen = 1;
+			err = sock->ops->recvmsg(NULL, sock, &msg,
+						 1, MSG_DONTWAIT | MSG_TRUNC);
+			pr_debug("Discarded rx packet: len %zd\n", sock_len);
+			continue;
+		}
 		/* OK, now we need to know about added descriptors. */
 		/* OK, now we need to know about added descriptors. */
 		if (!headcount) {
 		if (!headcount) {
 			if (unlikely(vhost_enable_notify(&net->dev, vq))) {
 			if (unlikely(vhost_enable_notify(&net->dev, vq))) {

+ 18 - 6
drivers/xen/balloon.c

@@ -399,11 +399,25 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
 			state = BP_EAGAIN;
 			state = BP_EAGAIN;
 			break;
 			break;
 		}
 		}
+		scrub_page(page);
 
 
-		pfn = page_to_pfn(page);
-		frame_list[i] = pfn_to_mfn(pfn);
+		frame_list[i] = page_to_pfn(page);
+	}
 
 
-		scrub_page(page);
+	/*
+	 * Ensure that ballooned highmem pages don't have kmaps.
+	 *
+	 * Do this before changing the p2m as kmap_flush_unused()
+	 * reads PTEs to obtain pages (and hence needs the original
+	 * p2m entry).
+	 */
+	kmap_flush_unused();
+
+	/* Update direct mapping, invalidate P2M, and add to balloon. */
+	for (i = 0; i < nr_pages; i++) {
+		pfn = frame_list[i];
+		frame_list[i] = pfn_to_mfn(pfn);
+		page = pfn_to_page(pfn);
 
 
 #ifdef CONFIG_XEN_HAVE_PVMMU
 #ifdef CONFIG_XEN_HAVE_PVMMU
 		/*
 		/*
@@ -429,11 +443,9 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
 		}
 		}
 #endif
 #endif
 
 
-		balloon_append(pfn_to_page(pfn));
+		balloon_append(page);
 	}
 	}
 
 
-	/* Ensure that ballooned highmem pages don't have kmaps. */
-	kmap_flush_unused();
 	flush_tlb_all();
 	flush_tlb_all();
 
 
 	set_xen_guest_handle(reservation.extent_start, frame_list);
 	set_xen_guest_handle(reservation.extent_start, frame_list);

+ 8 - 26
fs/anon_inodes.c

@@ -41,19 +41,8 @@ static const struct dentry_operations anon_inodefs_dentry_operations = {
 static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type,
 static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type,
 				int flags, const char *dev_name, void *data)
 				int flags, const char *dev_name, void *data)
 {
 {
-	struct dentry *root;
-	root = mount_pseudo(fs_type, "anon_inode:", NULL,
+	return mount_pseudo(fs_type, "anon_inode:", NULL,
 			&anon_inodefs_dentry_operations, ANON_INODE_FS_MAGIC);
 			&anon_inodefs_dentry_operations, ANON_INODE_FS_MAGIC);
-	if (!IS_ERR(root)) {
-		struct super_block *s = root->d_sb;
-		anon_inode_inode = alloc_anon_inode(s);
-		if (IS_ERR(anon_inode_inode)) {
-			dput(root);
-			deactivate_locked_super(s);
-			root = ERR_CAST(anon_inode_inode);
-		}
-	}
-	return root;
 }
 }
 
 
 static struct file_system_type anon_inode_fs_type = {
 static struct file_system_type anon_inode_fs_type = {
@@ -175,22 +164,15 @@ EXPORT_SYMBOL_GPL(anon_inode_getfd);
 
 
 static int __init anon_inode_init(void)
 static int __init anon_inode_init(void)
 {
 {
-	int error;
-
-	error = register_filesystem(&anon_inode_fs_type);
-	if (error)
-		goto err_exit;
 	anon_inode_mnt = kern_mount(&anon_inode_fs_type);
 	anon_inode_mnt = kern_mount(&anon_inode_fs_type);
-	if (IS_ERR(anon_inode_mnt)) {
-		error = PTR_ERR(anon_inode_mnt);
-		goto err_unregister_filesystem;
-	}
-	return 0;
+	if (IS_ERR(anon_inode_mnt))
+		panic("anon_inode_init() kernel mount failed (%ld)\n", PTR_ERR(anon_inode_mnt));
 
 
-err_unregister_filesystem:
-	unregister_filesystem(&anon_inode_fs_type);
-err_exit:
-	panic(KERN_ERR "anon_inode_init() failed (%d)\n", error);
+	anon_inode_inode = alloc_anon_inode(anon_inode_mnt->mnt_sb);
+	if (IS_ERR(anon_inode_inode))
+		panic("anon_inode_init() inode allocation failed (%ld)\n", PTR_ERR(anon_inode_inode));
+
+	return 0;
 }
 }
 
 
 fs_initcall(anon_inode_init);
 fs_initcall(anon_inode_init);

+ 2 - 2
fs/dcache.c

@@ -2833,9 +2833,9 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name)
 	u32 dlen = ACCESS_ONCE(name->len);
 	u32 dlen = ACCESS_ONCE(name->len);
 	char *p;
 	char *p;
 
 
-	if (*buflen < dlen + 1)
-		return -ENAMETOOLONG;
 	*buflen -= dlen + 1;
 	*buflen -= dlen + 1;
+	if (*buflen < 0)
+		return -ENAMETOOLONG;
 	p = *buffer -= dlen + 1;
 	p = *buffer -= dlen + 1;
 	*p++ = '/';
 	*p++ = '/';
 	while (dlen--) {
 	while (dlen--) {

+ 9 - 6
fs/ext4/inode.c

@@ -38,6 +38,7 @@
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/ratelimit.h>
 #include <linux/ratelimit.h>
 #include <linux/aio.h>
 #include <linux/aio.h>
+#include <linux/bitops.h>
 
 
 #include "ext4_jbd2.h"
 #include "ext4_jbd2.h"
 #include "xattr.h"
 #include "xattr.h"
@@ -3921,18 +3922,20 @@ int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
 void ext4_set_inode_flags(struct inode *inode)
 void ext4_set_inode_flags(struct inode *inode)
 {
 {
 	unsigned int flags = EXT4_I(inode)->i_flags;
 	unsigned int flags = EXT4_I(inode)->i_flags;
+	unsigned int new_fl = 0;
 
 
-	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
 	if (flags & EXT4_SYNC_FL)
 	if (flags & EXT4_SYNC_FL)
-		inode->i_flags |= S_SYNC;
+		new_fl |= S_SYNC;
 	if (flags & EXT4_APPEND_FL)
 	if (flags & EXT4_APPEND_FL)
-		inode->i_flags |= S_APPEND;
+		new_fl |= S_APPEND;
 	if (flags & EXT4_IMMUTABLE_FL)
 	if (flags & EXT4_IMMUTABLE_FL)
-		inode->i_flags |= S_IMMUTABLE;
+		new_fl |= S_IMMUTABLE;
 	if (flags & EXT4_NOATIME_FL)
 	if (flags & EXT4_NOATIME_FL)
-		inode->i_flags |= S_NOATIME;
+		new_fl |= S_NOATIME;
 	if (flags & EXT4_DIRSYNC_FL)
 	if (flags & EXT4_DIRSYNC_FL)
-		inode->i_flags |= S_DIRSYNC;
+		new_fl |= S_DIRSYNC;
+	set_mask_bits(&inode->i_flags,
+		      S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC, new_fl);
 }
 }
 
 
 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */

+ 4 - 15
fs/file.c

@@ -713,27 +713,16 @@ unsigned long __fdget_raw(unsigned int fd)
 
 
 unsigned long __fdget_pos(unsigned int fd)
 unsigned long __fdget_pos(unsigned int fd)
 {
 {
-	struct files_struct *files = current->files;
-	struct file *file;
-	unsigned long v;
-
-	if (atomic_read(&files->count) == 1) {
-		file = __fcheck_files(files, fd);
-		v = 0;
-	} else {
-		file = __fget(fd, 0);
-		v = FDPUT_FPUT;
-	}
-	if (!file)
-		return 0;
+	unsigned long v = __fdget(fd);
+	struct file *file = (struct file *)(v & ~3);
 
 
-	if (file->f_mode & FMODE_ATOMIC_POS) {
+	if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
 		if (file_count(file) > 1) {
 		if (file_count(file) > 1) {
 			v |= FDPUT_POS_UNLOCK;
 			v |= FDPUT_POS_UNLOCK;
 			mutex_lock(&file->f_pos_lock);
 			mutex_lock(&file->f_pos_lock);
 		}
 		}
 	}
 	}
-	return v | (unsigned long)file;
+	return v;
 }
 }
 
 
 /*
 /*

Some files were not shown because too many files changed in this diff