Browse Source

Merge branch 'mipsr6-for-3.20' of git://git.linux-mips.org/pub/scm/mchandras/linux into mips-for-linux-next

Ralf Baechle 10 years ago
parent
commit
661af35e5f
65 changed files with 4405 additions and 415 deletions
  1. 68 4
      arch/mips/Kconfig
  2. 0 13
      arch/mips/Kconfig.debug
  3. 2 0
      arch/mips/Makefile
  4. 193 0
      arch/mips/configs/malta_qemu_32r6_defconfig
  5. 1 0
      arch/mips/include/asm/Kbuild
  6. 10 8
      arch/mips/include/asm/asmmacro.h
  7. 21 21
      arch/mips/include/asm/atomic.h
  8. 32 32
      arch/mips/include/asm/bitops.h
  9. 5 0
      arch/mips/include/asm/checksum.h
  10. 17 17
      arch/mips/include/asm/cmpxchg.h
  11. 21 3
      arch/mips/include/asm/compiler.h
  12. 23 5
      arch/mips/include/asm/cpu-features.h
  13. 7 0
      arch/mips/include/asm/cpu-type.h
  14. 9 2
      arch/mips/include/asm/cpu.h
  15. 2 2
      arch/mips/include/asm/edac.h
  16. 6 4
      arch/mips/include/asm/elf.h
  17. 2 1
      arch/mips/include/asm/fpu.h
  18. 12 12
      arch/mips/include/asm/futex.h
  19. 5 4
      arch/mips/include/asm/hazards.h
  20. 4 3
      arch/mips/include/asm/irqflags.h
  21. 3 2
      arch/mips/include/asm/local.h
  22. 12 12
      arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
  23. 96 0
      arch/mips/include/asm/mips-r2-to-r6-emul.h
  24. 3 0
      arch/mips/include/asm/mipsregs.h
  25. 4 0
      arch/mips/include/asm/module.h
  26. 1 1
      arch/mips/include/asm/octeon/cvmx-cmd-queue.h
  27. 148 2
      arch/mips/include/asm/r4kcache.h
  28. 26 29
      arch/mips/include/asm/spinlock.h
  29. 2 2
      arch/mips/include/asm/spram.h
  30. 4 4
      arch/mips/include/asm/stackframe.h
  31. 6 3
      arch/mips/include/asm/switch_to.h
  32. 1 1
      arch/mips/include/asm/thread_info.h
  33. 14 10
      arch/mips/include/uapi/asm/inst.h
  34. 2 1
      arch/mips/kernel/Makefile
  35. 1 0
      arch/mips/kernel/asm-offsets.c
  36. 250 38
      arch/mips/kernel/branch.c
  37. 1 1
      arch/mips/kernel/cevt-r4k.c
  38. 8 8
      arch/mips/kernel/cps-vec.S
  39. 7 4
      arch/mips/kernel/cpu-bugs64.c
  40. 23 4
      arch/mips/kernel/cpu-probe.c
  41. 188 115
      arch/mips/kernel/elf.c
  42. 21 2
      arch/mips/kernel/entry.S
  43. 1 1
      arch/mips/kernel/genex.S
  44. 1 0
      arch/mips/kernel/idle.c
  45. 2378 0
      arch/mips/kernel/mips-r2-to-r6-emul.c
  46. 2 0
      arch/mips/kernel/mips_ksyms.c
  47. 7 1
      arch/mips/kernel/proc.c
  48. 4 0
      arch/mips/kernel/process.c
  49. 9 3
      arch/mips/kernel/r4k_fpu.S
  50. 8 6
      arch/mips/kernel/r4k_switch.S
  51. 1 0
      arch/mips/kernel/spram.c
  52. 1 1
      arch/mips/kernel/syscall.c
  53. 35 6
      arch/mips/kernel/traps.c
  54. 386 4
      arch/mips/kernel/unaligned.c
  55. 1 0
      arch/mips/lib/Makefile
  56. 23 0
      arch/mips/lib/memcpy.S
  57. 47 0
      arch/mips/lib/memset.S
  58. 1 1
      arch/mips/lib/mips-atomic.c
  59. 158 11
      arch/mips/math-emu/cp1emu.c
  60. 4 2
      arch/mips/mm/c-r4k.c
  61. 26 4
      arch/mips/mm/page.c
  62. 3 1
      arch/mips/mm/sc-mips.c
  63. 4 3
      arch/mips/mm/tlbex.c
  64. 32 0
      arch/mips/mm/uasm-mips.c
  65. 12 1
      arch/mips/mm/uasm.c

+ 68 - 4
arch/mips/Kconfig

@@ -377,8 +377,10 @@ config MIPS_MALTA
 	select SYS_HAS_CPU_MIPS32_R1
 	select SYS_HAS_CPU_MIPS32_R1
 	select SYS_HAS_CPU_MIPS32_R2
 	select SYS_HAS_CPU_MIPS32_R2
 	select SYS_HAS_CPU_MIPS32_R3_5
 	select SYS_HAS_CPU_MIPS32_R3_5
+	select SYS_HAS_CPU_MIPS32_R6
 	select SYS_HAS_CPU_MIPS64_R1
 	select SYS_HAS_CPU_MIPS64_R1
 	select SYS_HAS_CPU_MIPS64_R2
 	select SYS_HAS_CPU_MIPS64_R2
+	select SYS_HAS_CPU_MIPS64_R6
 	select SYS_HAS_CPU_NEVADA
 	select SYS_HAS_CPU_NEVADA
 	select SYS_HAS_CPU_RM7000
 	select SYS_HAS_CPU_RM7000
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_32BIT_KERNEL
@@ -1034,6 +1036,9 @@ config MIPS_MACHINE
 config NO_IOPORT_MAP
 config NO_IOPORT_MAP
 	def_bool n
 	def_bool n
 
 
+config GENERIC_CSUM
+	bool
+
 config GENERIC_ISA_DMA
 config GENERIC_ISA_DMA
 	bool
 	bool
 	select ZONE_DMA if GENERIC_ISA_DMA_SUPPORT_BROKEN=n
 	select ZONE_DMA if GENERIC_ISA_DMA_SUPPORT_BROKEN=n
@@ -1147,6 +1152,9 @@ config SOC_PNX8335
 	bool
 	bool
 	select SOC_PNX833X
 	select SOC_PNX833X
 
 
+config MIPS_SPRAM
+	bool
+
 config SWAP_IO_SPACE
 config SWAP_IO_SPACE
 	bool
 	bool
 
 
@@ -1305,6 +1313,22 @@ config CPU_MIPS32_R2
 	  specific type of processor in your system, choose those that one
 	  specific type of processor in your system, choose those that one
 	  otherwise CPU_MIPS32_R1 is a safe bet for any MIPS32 system.
 	  otherwise CPU_MIPS32_R1 is a safe bet for any MIPS32 system.
 
 
+config CPU_MIPS32_R6
+	bool "MIPS32 Release 6 (EXPERIMENTAL)"
+	depends on SYS_HAS_CPU_MIPS32_R6
+	select CPU_HAS_PREFETCH
+	select CPU_SUPPORTS_32BIT_KERNEL
+	select CPU_SUPPORTS_HIGHMEM
+	select CPU_SUPPORTS_MSA
+	select GENERIC_CSUM
+	select HAVE_KVM
+	select MIPS_O32_FP64_SUPPORT
+	help
+	  Choose this option to build a kernel for release 6 or later of the
+	  MIPS32 architecture.  New MIPS processors, starting with the Warrior
+	  family, are based on a MIPS32r6 processor. If you own an older
+	  processor, you probably need to select MIPS32r1 or MIPS32r2 instead.
+
 config CPU_MIPS64_R1
 config CPU_MIPS64_R1
 	bool "MIPS64 Release 1"
 	bool "MIPS64 Release 1"
 	depends on SYS_HAS_CPU_MIPS64_R1
 	depends on SYS_HAS_CPU_MIPS64_R1
@@ -1340,6 +1364,21 @@ config CPU_MIPS64_R2
 	  specific type of processor in your system, choose those that one
 	  specific type of processor in your system, choose those that one
 	  otherwise CPU_MIPS64_R1 is a safe bet for any MIPS64 system.
 	  otherwise CPU_MIPS64_R1 is a safe bet for any MIPS64 system.
 
 
+config CPU_MIPS64_R6
+	bool "MIPS64 Release 6 (EXPERIMENTAL)"
+	depends on SYS_HAS_CPU_MIPS64_R6
+	select CPU_HAS_PREFETCH
+	select CPU_SUPPORTS_32BIT_KERNEL
+	select CPU_SUPPORTS_64BIT_KERNEL
+	select CPU_SUPPORTS_HIGHMEM
+	select CPU_SUPPORTS_MSA
+	select GENERIC_CSUM
+	help
+	  Choose this option to build a kernel for release 6 or later of the
+	  MIPS64 architecture.  New MIPS processors, starting with the Warrior
+	  family, are based on a MIPS64r6 processor. If you own an older
+	  processor, you probably need to select MIPS64r1 or MIPS64r2 instead.
+
 config CPU_R3000
 config CPU_R3000
 	bool "R3000"
 	bool "R3000"
 	depends on SYS_HAS_CPU_R3000
 	depends on SYS_HAS_CPU_R3000
@@ -1540,7 +1579,7 @@ endchoice
 config CPU_MIPS32_3_5_FEATURES
 config CPU_MIPS32_3_5_FEATURES
 	bool "MIPS32 Release 3.5 Features"
 	bool "MIPS32 Release 3.5 Features"
 	depends on SYS_HAS_CPU_MIPS32_R3_5
 	depends on SYS_HAS_CPU_MIPS32_R3_5
-	depends on CPU_MIPS32_R2
+	depends on CPU_MIPS32_R2 || CPU_MIPS32_R6
 	help
 	help
 	  Choose this option to build a kernel for release 2 or later of the
 	  Choose this option to build a kernel for release 2 or later of the
 	  MIPS32 architecture including features from the 3.5 release such as
 	  MIPS32 architecture including features from the 3.5 release such as
@@ -1660,12 +1699,18 @@ config SYS_HAS_CPU_MIPS32_R2
 config SYS_HAS_CPU_MIPS32_R3_5
 config SYS_HAS_CPU_MIPS32_R3_5
 	bool
 	bool
 
 
+config SYS_HAS_CPU_MIPS32_R6
+	bool
+
 config SYS_HAS_CPU_MIPS64_R1
 config SYS_HAS_CPU_MIPS64_R1
 	bool
 	bool
 
 
 config SYS_HAS_CPU_MIPS64_R2
 config SYS_HAS_CPU_MIPS64_R2
 	bool
 	bool
 
 
+config SYS_HAS_CPU_MIPS64_R6
+	bool
+
 config SYS_HAS_CPU_R3000
 config SYS_HAS_CPU_R3000
 	bool
 	bool
 
 
@@ -1765,11 +1810,11 @@ endmenu
 #
 #
 config CPU_MIPS32
 config CPU_MIPS32
 	bool
 	bool
-	default y if CPU_MIPS32_R1 || CPU_MIPS32_R2
+	default y if CPU_MIPS32_R1 || CPU_MIPS32_R2 || CPU_MIPS32_R6
 
 
 config CPU_MIPS64
 config CPU_MIPS64
 	bool
 	bool
-	default y if CPU_MIPS64_R1 || CPU_MIPS64_R2
+	default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 || CPU_MIPS64_R6
 
 
 #
 #
 # These two indicate the revision of the architecture, either Release 1 or Release 2
 # These two indicate the revision of the architecture, either Release 1 or Release 2
@@ -1781,6 +1826,12 @@ config CPU_MIPSR1
 config CPU_MIPSR2
 config CPU_MIPSR2
 	bool
 	bool
 	default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_CAVIUM_OCTEON
 	default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_CAVIUM_OCTEON
+	select MIPS_SPRAM
+
+config CPU_MIPSR6
+	bool
+	default y if CPU_MIPS32_R6 || CPU_MIPS64_R6
+	select MIPS_SPRAM
 
 
 config EVA
 config EVA
 	bool
 	bool
@@ -2014,6 +2065,19 @@ config MIPS_MT_FPAFF
 	default y
 	default y
 	depends on MIPS_MT_SMP
 	depends on MIPS_MT_SMP
 
 
+config MIPSR2_TO_R6_EMULATOR
+	bool "MIPS R2-to-R6 emulator"
+	depends on CPU_MIPSR6 && !SMP
+	default y
+	help
+	  Choose this option if you want to run non-R6 MIPS userland code.
+	  Even if you say 'Y' here, the emulator will still be disabled by
+	  default. You can enable it using the 'mipsr2emul' kernel option.
+	  The only reason this is a build-time option is to save ~14K from the
+	  final kernel image.
+comment "MIPS R2-to-R6 emulator is only available for UP kernels"
+	depends on SMP && CPU_MIPSR6
+
 config MIPS_VPE_LOADER
 config MIPS_VPE_LOADER
 	bool "VPE loader support."
 	bool "VPE loader support."
 	depends on SYS_SUPPORTS_MULTITHREADING && MODULES
 	depends on SYS_SUPPORTS_MULTITHREADING && MODULES
@@ -2149,7 +2213,7 @@ config CPU_HAS_SMARTMIPS
 	  here.
 	  here.
 
 
 config CPU_MICROMIPS
 config CPU_MICROMIPS
-	depends on 32BIT && SYS_SUPPORTS_MICROMIPS
+	depends on 32BIT && SYS_SUPPORTS_MICROMIPS && !CPU_MIPSR6
 	bool "microMIPS"
 	bool "microMIPS"
 	help
 	help
 	  When this option is enabled the kernel will be built using the
 	  When this option is enabled the kernel will be built using the

+ 0 - 13
arch/mips/Kconfig.debug

@@ -122,17 +122,4 @@ config SPINLOCK_TEST
 	help
 	help
 	  Add several files to the debugfs to test spinlock speed.
 	  Add several files to the debugfs to test spinlock speed.
 
 
-config FP32XX_HYBRID_FPRS
-	bool "Run FP32 & FPXX code with hybrid FPRs"
-	depends on MIPS_O32_FP64_SUPPORT
-	help
-	  The hybrid FPR scheme is normally used only when a program needs to
-	  execute a mix of FP32 & FP64A code, since the trapping & emulation
-	  that it entails is expensive. When enabled, this option will lead
-	  to the kernel running programs which use the FP32 & FPXX FP ABIs
-	  using the hybrid FPR scheme, which can be useful for debugging
-	  purposes.
-
-	  If unsure, say N.
-
 endmenu
 endmenu

+ 2 - 0
arch/mips/Makefile

@@ -138,10 +138,12 @@ cflags-$(CONFIG_CPU_MIPS32_R1)	+= $(call cc-option,-march=mips32,-mips32 -U_MIPS
 			-Wa,-mips32 -Wa,--trap
 			-Wa,-mips32 -Wa,--trap
 cflags-$(CONFIG_CPU_MIPS32_R2)	+= $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
 cflags-$(CONFIG_CPU_MIPS32_R2)	+= $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
 			-Wa,-mips32r2 -Wa,--trap
 			-Wa,-mips32r2 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS32_R6)	+= -march=mips32r6 -Wa,--trap
 cflags-$(CONFIG_CPU_MIPS64_R1)	+= $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
 cflags-$(CONFIG_CPU_MIPS64_R1)	+= $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
 			-Wa,-mips64 -Wa,--trap
 			-Wa,-mips64 -Wa,--trap
 cflags-$(CONFIG_CPU_MIPS64_R2)	+= $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
 cflags-$(CONFIG_CPU_MIPS64_R2)	+= $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
 			-Wa,-mips64r2 -Wa,--trap
 			-Wa,-mips64r2 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS64_R6)	+= -march=mips64r6 -Wa,--trap
 cflags-$(CONFIG_CPU_R5000)	+= -march=r5000 -Wa,--trap
 cflags-$(CONFIG_CPU_R5000)	+= -march=r5000 -Wa,--trap
 cflags-$(CONFIG_CPU_R5432)	+= $(call cc-option,-march=r5400,-march=r5000) \
 cflags-$(CONFIG_CPU_R5432)	+= $(call cc-option,-march=r5400,-march=r5000) \
 			-Wa,--trap
 			-Wa,--trap

+ 193 - 0
arch/mips/configs/malta_qemu_32r6_defconfig

@@ -0,0 +1,193 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R6=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_HZ_100=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+# CONFIG_INET_LRO is not set
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_CLS_IND=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_IDE=y
+# CONFIG_IDE_PROC_FS is not set
+# CONFIG_IDEPCI_PCIBUS_ORDER is not set
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+CONFIG_PCNET32=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+# CONFIG_VT is not set
+CONFIG_LEGACY_PTY_COUNT=4
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_G=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set

+ 1 - 0
arch/mips/include/asm/Kbuild

@@ -1,4 +1,5 @@
 # MIPS headers
 # MIPS headers
+generic-(CONFIG_GENERIC_CSUM) += checksum.h
 generic-y += cputime.h
 generic-y += cputime.h
 generic-y += current.h
 generic-y += current.h
 generic-y += dma-contiguous.h
 generic-y += dma-contiguous.h

+ 10 - 8
arch/mips/include/asm/asmmacro.h

@@ -19,7 +19,7 @@
 #include <asm/asmmacro-64.h>
 #include <asm/asmmacro-64.h>
 #endif
 #endif
 
 
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 	.macro	local_irq_enable reg=t0
 	.macro	local_irq_enable reg=t0
 	ei
 	ei
 	irq_enable_hazard
 	irq_enable_hazard
@@ -104,7 +104,8 @@
 	.endm
 	.endm
 
 
 	.macro	fpu_save_double thread status tmp
 	.macro	fpu_save_double thread status tmp
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
+		defined(CONFIG_CPU_MIPS32_R6)
 	sll	\tmp, \status, 5
 	sll	\tmp, \status, 5
 	bgez	\tmp, 10f
 	bgez	\tmp, 10f
 	fpu_save_16odd \thread
 	fpu_save_16odd \thread
@@ -160,7 +161,8 @@
 	.endm
 	.endm
 
 
 	.macro	fpu_restore_double thread status tmp
 	.macro	fpu_restore_double thread status tmp
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
+		defined(CONFIG_CPU_MIPS32_R6)
 	sll	\tmp, \status, 5
 	sll	\tmp, \status, 5
 	bgez	\tmp, 10f				# 16 register mode?
 	bgez	\tmp, 10f				# 16 register mode?
 
 
@@ -170,16 +172,16 @@
 	fpu_restore_16even \thread \tmp
 	fpu_restore_16even \thread \tmp
 	.endm
 	.endm
 
 
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 	.macro	_EXT	rd, rs, p, s
 	.macro	_EXT	rd, rs, p, s
 	ext	\rd, \rs, \p, \s
 	ext	\rd, \rs, \p, \s
 	.endm
 	.endm
-#else /* !CONFIG_CPU_MIPSR2 */
+#else /* !CONFIG_CPU_MIPSR2 || !CONFIG_CPU_MIPSR6 */
 	.macro	_EXT	rd, rs, p, s
 	.macro	_EXT	rd, rs, p, s
 	srl	\rd, \rs, \p
 	srl	\rd, \rs, \p
 	andi	\rd, \rd, (1 << \s) - 1
 	andi	\rd, \rd, (1 << \s) - 1
 	.endm
 	.endm
-#endif /* !CONFIG_CPU_MIPSR2 */
+#endif /* !CONFIG_CPU_MIPSR2 || !CONFIG_CPU_MIPSR6 */
 
 
 /*
 /*
  * Temporary until all gas have MT ASE support
  * Temporary until all gas have MT ASE support
@@ -304,7 +306,7 @@
 	.set	push
 	.set	push
 	.set	noat
 	.set	noat
 	SET_HARDFLOAT
 	SET_HARDFLOAT
-	add	$1, \base, \off
+	addu	$1, \base, \off
 	.word	LDD_MSA_INSN | (\wd << 6)
 	.word	LDD_MSA_INSN | (\wd << 6)
 	.set	pop
 	.set	pop
 	.endm
 	.endm
@@ -313,7 +315,7 @@
 	.set	push
 	.set	push
 	.set	noat
 	.set	noat
 	SET_HARDFLOAT
 	SET_HARDFLOAT
-	add	$1, \base, \off
+	addu	$1, \base, \off
 	.word	STD_MSA_INSN | (\wd << 6)
 	.word	STD_MSA_INSN | (\wd << 6)
 	.set	pop
 	.set	pop
 	.endm
 	.endm

+ 21 - 21
arch/mips/include/asm/atomic.h

@@ -54,19 +54,19 @@ static __inline__ void atomic_##op(int i, atomic_t * v)			      \
 		"	sc	%0, %1					\n"   \
 		"	sc	%0, %1					\n"   \
 		"	beqzl	%0, 1b					\n"   \
 		"	beqzl	%0, 1b					\n"   \
 		"	.set	mips0					\n"   \
 		"	.set	mips0					\n"   \
-		: "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter)	      \
+		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)	      \
 		: "Ir" (i));						      \
 		: "Ir" (i));						      \
 	} else if (kernel_uses_llsc) {					      \
 	} else if (kernel_uses_llsc) {					      \
 		int temp;						      \
 		int temp;						      \
 									      \
 									      \
 		do {							      \
 		do {							      \
 			__asm__ __volatile__(				      \
 			__asm__ __volatile__(				      \
-			"	.set	arch=r4000			\n"   \
+			"	.set	"MIPS_ISA_LEVEL"		\n"   \
 			"	ll	%0, %1		# atomic_" #op "\n"   \
 			"	ll	%0, %1		# atomic_" #op "\n"   \
 			"	" #asm_op " %0, %2			\n"   \
 			"	" #asm_op " %0, %2			\n"   \
 			"	sc	%0, %1				\n"   \
 			"	sc	%0, %1				\n"   \
 			"	.set	mips0				\n"   \
 			"	.set	mips0				\n"   \
-			: "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter)      \
+			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)      \
 			: "Ir" (i));					      \
 			: "Ir" (i));					      \
 		} while (unlikely(!temp));				      \
 		} while (unlikely(!temp));				      \
 	} else {							      \
 	} else {							      \
@@ -97,20 +97,20 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v)		      \
 		"	" #asm_op " %0, %1, %3				\n"   \
 		"	" #asm_op " %0, %1, %3				\n"   \
 		"	.set	mips0					\n"   \
 		"	.set	mips0					\n"   \
 		: "=&r" (result), "=&r" (temp),				      \
 		: "=&r" (result), "=&r" (temp),				      \
-		  "+" GCC_OFF12_ASM() (v->counter)			      \
+		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
 		: "Ir" (i));						      \
 		: "Ir" (i));						      \
 	} else if (kernel_uses_llsc) {					      \
 	} else if (kernel_uses_llsc) {					      \
 		int temp;						      \
 		int temp;						      \
 									      \
 									      \
 		do {							      \
 		do {							      \
 			__asm__ __volatile__(				      \
 			__asm__ __volatile__(				      \
-			"	.set	arch=r4000			\n"   \
+			"	.set	"MIPS_ISA_LEVEL"		\n"   \
 			"	ll	%1, %2	# atomic_" #op "_return	\n"   \
 			"	ll	%1, %2	# atomic_" #op "_return	\n"   \
 			"	" #asm_op " %0, %1, %3			\n"   \
 			"	" #asm_op " %0, %1, %3			\n"   \
 			"	sc	%0, %2				\n"   \
 			"	sc	%0, %2				\n"   \
 			"	.set	mips0				\n"   \
 			"	.set	mips0				\n"   \
 			: "=&r" (result), "=&r" (temp),			      \
 			: "=&r" (result), "=&r" (temp),			      \
-			  "+" GCC_OFF12_ASM() (v->counter)		      \
+			  "+" GCC_OFF_SMALL_ASM() (v->counter)		      \
 			: "Ir" (i));					      \
 			: "Ir" (i));					      \
 		} while (unlikely(!result));				      \
 		} while (unlikely(!result));				      \
 									      \
 									      \
@@ -171,14 +171,14 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
 		"1:							\n"
 		"1:							\n"
 		"	.set	mips0					\n"
 		"	.set	mips0					\n"
 		: "=&r" (result), "=&r" (temp),
 		: "=&r" (result), "=&r" (temp),
-		  "+" GCC_OFF12_ASM() (v->counter)
-		: "Ir" (i), GCC_OFF12_ASM() (v->counter)
+		  "+" GCC_OFF_SMALL_ASM() (v->counter)
+		: "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)
 		: "memory");
 		: "memory");
 	} else if (kernel_uses_llsc) {
 	} else if (kernel_uses_llsc) {
 		int temp;
 		int temp;
 
 
 		__asm__ __volatile__(
 		__asm__ __volatile__(
-		"	.set	arch=r4000				\n"
+		"	.set	"MIPS_ISA_LEVEL"			\n"
 		"1:	ll	%1, %2		# atomic_sub_if_positive\n"
 		"1:	ll	%1, %2		# atomic_sub_if_positive\n"
 		"	subu	%0, %1, %3				\n"
 		"	subu	%0, %1, %3				\n"
 		"	bltz	%0, 1f					\n"
 		"	bltz	%0, 1f					\n"
@@ -190,7 +190,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
 		"1:							\n"
 		"1:							\n"
 		"	.set	mips0					\n"
 		"	.set	mips0					\n"
 		: "=&r" (result), "=&r" (temp),
 		: "=&r" (result), "=&r" (temp),
-		  "+" GCC_OFF12_ASM() (v->counter)
+		  "+" GCC_OFF_SMALL_ASM() (v->counter)
 		: "Ir" (i));
 		: "Ir" (i));
 	} else {
 	} else {
 		unsigned long flags;
 		unsigned long flags;
@@ -333,19 +333,19 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v)		      \
 		"	scd	%0, %1					\n"   \
 		"	scd	%0, %1					\n"   \
 		"	beqzl	%0, 1b					\n"   \
 		"	beqzl	%0, 1b					\n"   \
 		"	.set	mips0					\n"   \
 		"	.set	mips0					\n"   \
-		: "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter)	      \
+		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)	      \
 		: "Ir" (i));						      \
 		: "Ir" (i));						      \
 	} else if (kernel_uses_llsc) {					      \
 	} else if (kernel_uses_llsc) {					      \
 		long temp;						      \
 		long temp;						      \
 									      \
 									      \
 		do {							      \
 		do {							      \
 			__asm__ __volatile__(				      \
 			__asm__ __volatile__(				      \
-			"	.set	arch=r4000			\n"   \
+			"	.set	"MIPS_ISA_LEVEL"		\n"   \
 			"	lld	%0, %1		# atomic64_" #op "\n" \
 			"	lld	%0, %1		# atomic64_" #op "\n" \
 			"	" #asm_op " %0, %2			\n"   \
 			"	" #asm_op " %0, %2			\n"   \
 			"	scd	%0, %1				\n"   \
 			"	scd	%0, %1				\n"   \
 			"	.set	mips0				\n"   \
 			"	.set	mips0				\n"   \
-			: "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter)      \
+			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)      \
 			: "Ir" (i));					      \
 			: "Ir" (i));					      \
 		} while (unlikely(!temp));				      \
 		} while (unlikely(!temp));				      \
 	} else {							      \
 	} else {							      \
@@ -376,21 +376,21 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v)	      \
 		"	" #asm_op " %0, %1, %3				\n"   \
 		"	" #asm_op " %0, %1, %3				\n"   \
 		"	.set	mips0					\n"   \
 		"	.set	mips0					\n"   \
 		: "=&r" (result), "=&r" (temp),				      \
 		: "=&r" (result), "=&r" (temp),				      \
-		  "+" GCC_OFF12_ASM() (v->counter)			      \
+		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
 		: "Ir" (i));						      \
 		: "Ir" (i));						      \
 	} else if (kernel_uses_llsc) {					      \
 	} else if (kernel_uses_llsc) {					      \
 		long temp;						      \
 		long temp;						      \
 									      \
 									      \
 		do {							      \
 		do {							      \
 			__asm__ __volatile__(				      \
 			__asm__ __volatile__(				      \
-			"	.set	arch=r4000			\n"   \
+			"	.set	"MIPS_ISA_LEVEL"		\n"   \
 			"	lld	%1, %2	# atomic64_" #op "_return\n"  \
 			"	lld	%1, %2	# atomic64_" #op "_return\n"  \
 			"	" #asm_op " %0, %1, %3			\n"   \
 			"	" #asm_op " %0, %1, %3			\n"   \
 			"	scd	%0, %2				\n"   \
 			"	scd	%0, %2				\n"   \
 			"	.set	mips0				\n"   \
 			"	.set	mips0				\n"   \
 			: "=&r" (result), "=&r" (temp),			      \
 			: "=&r" (result), "=&r" (temp),			      \
-			  "=" GCC_OFF12_ASM() (v->counter)		      \
-			: "Ir" (i), GCC_OFF12_ASM() (v->counter)	      \
+			  "=" GCC_OFF_SMALL_ASM() (v->counter)		      \
+			: "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)	      \
 			: "memory");					      \
 			: "memory");					      \
 		} while (unlikely(!result));				      \
 		} while (unlikely(!result));				      \
 									      \
 									      \
@@ -452,14 +452,14 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
 		"1:							\n"
 		"1:							\n"
 		"	.set	mips0					\n"
 		"	.set	mips0					\n"
 		: "=&r" (result), "=&r" (temp),
 		: "=&r" (result), "=&r" (temp),
-		  "=" GCC_OFF12_ASM() (v->counter)
-		: "Ir" (i), GCC_OFF12_ASM() (v->counter)
+		  "=" GCC_OFF_SMALL_ASM() (v->counter)
+		: "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)
 		: "memory");
 		: "memory");
 	} else if (kernel_uses_llsc) {
 	} else if (kernel_uses_llsc) {
 		long temp;
 		long temp;
 
 
 		__asm__ __volatile__(
 		__asm__ __volatile__(
-		"	.set	arch=r4000				\n"
+		"	.set	"MIPS_ISA_LEVEL"			\n"
 		"1:	lld	%1, %2		# atomic64_sub_if_positive\n"
 		"1:	lld	%1, %2		# atomic64_sub_if_positive\n"
 		"	dsubu	%0, %1, %3				\n"
 		"	dsubu	%0, %1, %3				\n"
 		"	bltz	%0, 1f					\n"
 		"	bltz	%0, 1f					\n"
@@ -471,7 +471,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
 		"1:							\n"
 		"1:							\n"
 		"	.set	mips0					\n"
 		"	.set	mips0					\n"
 		: "=&r" (result), "=&r" (temp),
 		: "=&r" (result), "=&r" (temp),
-		  "+" GCC_OFF12_ASM() (v->counter)
+		  "+" GCC_OFF_SMALL_ASM() (v->counter)
 		: "Ir" (i));
 		: "Ir" (i));
 	} else {
 	} else {
 		unsigned long flags;
 		unsigned long flags;

+ 32 - 32
arch/mips/include/asm/bitops.h

@@ -79,28 +79,28 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
 		"	" __SC	"%0, %1					\n"
 		"	" __SC	"%0, %1					\n"
 		"	beqzl	%0, 1b					\n"
 		"	beqzl	%0, 1b					\n"
 		"	.set	mips0					\n"
 		"	.set	mips0					\n"
-		: "=&r" (temp), "=" GCC_OFF12_ASM() (*m)
-		: "ir" (1UL << bit), GCC_OFF12_ASM() (*m));
-#ifdef CONFIG_CPU_MIPSR2
+		: "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
+		: "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
 	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
 		do {
 		do {
 			__asm__ __volatile__(
 			__asm__ __volatile__(
 			"	" __LL "%0, %1		# set_bit	\n"
 			"	" __LL "%0, %1		# set_bit	\n"
 			"	" __INS "%0, %3, %2, 1			\n"
 			"	" __INS "%0, %3, %2, 1			\n"
 			"	" __SC "%0, %1				\n"
 			"	" __SC "%0, %1				\n"
-			: "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
 			: "ir" (bit), "r" (~0));
 			: "ir" (bit), "r" (~0));
 		} while (unlikely(!temp));
 		} while (unlikely(!temp));
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
 	} else if (kernel_uses_llsc) {
 	} else if (kernel_uses_llsc) {
 		do {
 		do {
 			__asm__ __volatile__(
 			__asm__ __volatile__(
-			"	.set	arch=r4000			\n"
+			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
 			"	" __LL "%0, %1		# set_bit	\n"
 			"	" __LL "%0, %1		# set_bit	\n"
 			"	or	%0, %2				\n"
 			"	or	%0, %2				\n"
 			"	" __SC	"%0, %1				\n"
 			"	" __SC	"%0, %1				\n"
 			"	.set	mips0				\n"
 			"	.set	mips0				\n"
-			: "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
 			: "ir" (1UL << bit));
 			: "ir" (1UL << bit));
 		} while (unlikely(!temp));
 		} while (unlikely(!temp));
 	} else
 	} else
@@ -131,28 +131,28 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
 		"	" __SC "%0, %1					\n"
 		"	" __SC "%0, %1					\n"
 		"	beqzl	%0, 1b					\n"
 		"	beqzl	%0, 1b					\n"
 		"	.set	mips0					\n"
 		"	.set	mips0					\n"
-		: "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
 		: "ir" (~(1UL << bit)));
 		: "ir" (~(1UL << bit)));
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
 	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
 		do {
 		do {
 			__asm__ __volatile__(
 			__asm__ __volatile__(
 			"	" __LL "%0, %1		# clear_bit	\n"
 			"	" __LL "%0, %1		# clear_bit	\n"
 			"	" __INS "%0, $0, %2, 1			\n"
 			"	" __INS "%0, $0, %2, 1			\n"
 			"	" __SC "%0, %1				\n"
 			"	" __SC "%0, %1				\n"
-			: "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
 			: "ir" (bit));
 			: "ir" (bit));
 		} while (unlikely(!temp));
 		} while (unlikely(!temp));
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
 	} else if (kernel_uses_llsc) {
 	} else if (kernel_uses_llsc) {
 		do {
 		do {
 			__asm__ __volatile__(
 			__asm__ __volatile__(
-			"	.set	arch=r4000			\n"
+			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
 			"	" __LL "%0, %1		# clear_bit	\n"
 			"	" __LL "%0, %1		# clear_bit	\n"
 			"	and	%0, %2				\n"
 			"	and	%0, %2				\n"
 			"	" __SC "%0, %1				\n"
 			"	" __SC "%0, %1				\n"
 			"	.set	mips0				\n"
 			"	.set	mips0				\n"
-			: "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
 			: "ir" (~(1UL << bit)));
 			: "ir" (~(1UL << bit)));
 		} while (unlikely(!temp));
 		} while (unlikely(!temp));
 	} else
 	} else
@@ -197,7 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
 		"	" __SC	"%0, %1				\n"
 		"	" __SC	"%0, %1				\n"
 		"	beqzl	%0, 1b				\n"
 		"	beqzl	%0, 1b				\n"
 		"	.set	mips0				\n"
 		"	.set	mips0				\n"
-		: "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
 		: "ir" (1UL << bit));
 		: "ir" (1UL << bit));
 	} else if (kernel_uses_llsc) {
 	} else if (kernel_uses_llsc) {
 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
@@ -205,12 +205,12 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
 
 
 		do {
 		do {
 			__asm__ __volatile__(
 			__asm__ __volatile__(
-			"	.set	arch=r4000			\n"
+			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
 			"	" __LL "%0, %1		# change_bit	\n"
 			"	" __LL "%0, %1		# change_bit	\n"
 			"	xor	%0, %2				\n"
 			"	xor	%0, %2				\n"
 			"	" __SC	"%0, %1				\n"
 			"	" __SC	"%0, %1				\n"
 			"	.set	mips0				\n"
 			"	.set	mips0				\n"
-			: "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
 			: "ir" (1UL << bit));
 			: "ir" (1UL << bit));
 		} while (unlikely(!temp));
 		} while (unlikely(!temp));
 	} else
 	} else
@@ -245,7 +245,7 @@ static inline int test_and_set_bit(unsigned long nr,
 		"	beqzl	%2, 1b					\n"
 		"	beqzl	%2, 1b					\n"
 		"	and	%2, %0, %3				\n"
 		"	and	%2, %0, %3				\n"
 		"	.set	mips0					\n"
 		"	.set	mips0					\n"
-		: "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
 		: "r" (1UL << bit)
 		: "r" (1UL << bit)
 		: "memory");
 		: "memory");
 	} else if (kernel_uses_llsc) {
 	} else if (kernel_uses_llsc) {
@@ -254,12 +254,12 @@ static inline int test_and_set_bit(unsigned long nr,
 
 
 		do {
 		do {
 			__asm__ __volatile__(
 			__asm__ __volatile__(
-			"	.set	arch=r4000			\n"
+			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
 			"	" __LL "%0, %1	# test_and_set_bit	\n"
 			"	" __LL "%0, %1	# test_and_set_bit	\n"
 			"	or	%2, %0, %3			\n"
 			"	or	%2, %0, %3			\n"
 			"	" __SC	"%2, %1				\n"
 			"	" __SC	"%2, %1				\n"
 			"	.set	mips0				\n"
 			"	.set	mips0				\n"
-			: "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
 			: "r" (1UL << bit)
 			: "r" (1UL << bit)
 			: "memory");
 			: "memory");
 		} while (unlikely(!res));
 		} while (unlikely(!res));
@@ -308,12 +308,12 @@ static inline int test_and_set_bit_lock(unsigned long nr,
 
 
 		do {
 		do {
 			__asm__ __volatile__(
 			__asm__ __volatile__(
-			"	.set	arch=r4000			\n"
+			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
 			"	" __LL "%0, %1	# test_and_set_bit	\n"
 			"	" __LL "%0, %1	# test_and_set_bit	\n"
 			"	or	%2, %0, %3			\n"
 			"	or	%2, %0, %3			\n"
 			"	" __SC	"%2, %1				\n"
 			"	" __SC	"%2, %1				\n"
 			"	.set	mips0				\n"
 			"	.set	mips0				\n"
-			: "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
 			: "r" (1UL << bit)
 			: "r" (1UL << bit)
 			: "memory");
 			: "memory");
 		} while (unlikely(!res));
 		} while (unlikely(!res));
@@ -355,10 +355,10 @@ static inline int test_and_clear_bit(unsigned long nr,
 		"	beqzl	%2, 1b					\n"
 		"	beqzl	%2, 1b					\n"
 		"	and	%2, %0, %3				\n"
 		"	and	%2, %0, %3				\n"
 		"	.set	mips0					\n"
 		"	.set	mips0					\n"
-		: "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
 		: "r" (1UL << bit)
 		: "r" (1UL << bit)
 		: "memory");
 		: "memory");
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 	} else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
 	} else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
 		unsigned long temp;
 		unsigned long temp;
@@ -369,7 +369,7 @@ static inline int test_and_clear_bit(unsigned long nr,
 			"	" __EXT "%2, %0, %3, 1			\n"
 			"	" __EXT "%2, %0, %3, 1			\n"
 			"	" __INS "%0, $0, %3, 1			\n"
 			"	" __INS "%0, $0, %3, 1			\n"
 			"	" __SC	"%0, %1				\n"
 			"	" __SC	"%0, %1				\n"
-			: "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
 			: "ir" (bit)
 			: "ir" (bit)
 			: "memory");
 			: "memory");
 		} while (unlikely(!temp));
 		} while (unlikely(!temp));
@@ -380,13 +380,13 @@ static inline int test_and_clear_bit(unsigned long nr,
 
 
 		do {
 		do {
 			__asm__ __volatile__(
 			__asm__ __volatile__(
-			"	.set	arch=r4000			\n"
+			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
 			"	" __LL	"%0, %1 # test_and_clear_bit	\n"
 			"	" __LL	"%0, %1 # test_and_clear_bit	\n"
 			"	or	%2, %0, %3			\n"
 			"	or	%2, %0, %3			\n"
 			"	xor	%2, %3				\n"
 			"	xor	%2, %3				\n"
 			"	" __SC	"%2, %1				\n"
 			"	" __SC	"%2, %1				\n"
 			"	.set	mips0				\n"
 			"	.set	mips0				\n"
-			: "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
 			: "r" (1UL << bit)
 			: "r" (1UL << bit)
 			: "memory");
 			: "memory");
 		} while (unlikely(!res));
 		} while (unlikely(!res));
@@ -428,7 +428,7 @@ static inline int test_and_change_bit(unsigned long nr,
 		"	beqzl	%2, 1b					\n"
 		"	beqzl	%2, 1b					\n"
 		"	and	%2, %0, %3				\n"
 		"	and	%2, %0, %3				\n"
 		"	.set	mips0					\n"
 		"	.set	mips0					\n"
-		: "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
 		: "r" (1UL << bit)
 		: "r" (1UL << bit)
 		: "memory");
 		: "memory");
 	} else if (kernel_uses_llsc) {
 	} else if (kernel_uses_llsc) {
@@ -437,12 +437,12 @@ static inline int test_and_change_bit(unsigned long nr,
 
 
 		do {
 		do {
 			__asm__ __volatile__(
 			__asm__ __volatile__(
-			"	.set	arch=r4000			\n"
+			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
 			"	" __LL	"%0, %1 # test_and_change_bit	\n"
 			"	" __LL	"%0, %1 # test_and_change_bit	\n"
 			"	xor	%2, %0, %3			\n"
 			"	xor	%2, %0, %3			\n"
 			"	" __SC	"\t%2, %1			\n"
 			"	" __SC	"\t%2, %1			\n"
 			"	.set	mips0				\n"
 			"	.set	mips0				\n"
-			: "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
 			: "r" (1UL << bit)
 			: "r" (1UL << bit)
 			: "memory");
 			: "memory");
 		} while (unlikely(!res));
 		} while (unlikely(!res));
@@ -485,7 +485,7 @@ static inline unsigned long __fls(unsigned long word)
 	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
 	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
 		__asm__(
 		__asm__(
 		"	.set	push					\n"
 		"	.set	push					\n"
-		"	.set	mips32					\n"
+		"	.set	"MIPS_ISA_LEVEL"			\n"
 		"	clz	%0, %1					\n"
 		"	clz	%0, %1					\n"
 		"	.set	pop					\n"
 		"	.set	pop					\n"
 		: "=r" (num)
 		: "=r" (num)
@@ -498,7 +498,7 @@ static inline unsigned long __fls(unsigned long word)
 	    __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
 	    __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
 		__asm__(
 		__asm__(
 		"	.set	push					\n"
 		"	.set	push					\n"
-		"	.set	mips64					\n"
+		"	.set	"MIPS_ISA_LEVEL"			\n"
 		"	dclz	%0, %1					\n"
 		"	dclz	%0, %1					\n"
 		"	.set	pop					\n"
 		"	.set	pop					\n"
 		: "=r" (num)
 		: "=r" (num)
@@ -562,7 +562,7 @@ static inline int fls(int x)
 	if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
 	if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
 		__asm__(
 		__asm__(
 		"	.set	push					\n"
 		"	.set	push					\n"
-		"	.set	mips32					\n"
+		"	.set	"MIPS_ISA_LEVEL"			\n"
 		"	clz	%0, %1					\n"
 		"	clz	%0, %1					\n"
 		"	.set	pop					\n"
 		"	.set	pop					\n"
 		: "=r" (x)
 		: "=r" (x)

+ 5 - 0
arch/mips/include/asm/checksum.h

@@ -12,6 +12,10 @@
 #ifndef _ASM_CHECKSUM_H
 #ifndef _ASM_CHECKSUM_H
 #define _ASM_CHECKSUM_H
 #define _ASM_CHECKSUM_H
 
 
+#ifdef CONFIG_GENERIC_CSUM
+#include <asm-generic/checksum.h>
+#else
+
 #include <linux/in6.h>
 #include <linux/in6.h>
 
 
 #include <asm/uaccess.h>
 #include <asm/uaccess.h>
@@ -274,5 +278,6 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
 }
 }
 
 
 #include <asm-generic/checksum.h>
 #include <asm-generic/checksum.h>
+#endif /* CONFIG_GENERIC_CSUM */
 
 
 #endif /* _ASM_CHECKSUM_H */
 #endif /* _ASM_CHECKSUM_H */

+ 17 - 17
arch/mips/include/asm/cmpxchg.h

@@ -31,24 +31,24 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
 		"	sc	%2, %1					\n"
 		"	sc	%2, %1					\n"
 		"	beqzl	%2, 1b					\n"
 		"	beqzl	%2, 1b					\n"
 		"	.set	mips0					\n"
 		"	.set	mips0					\n"
-		: "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy)
-		: GCC_OFF12_ASM() (*m), "Jr" (val)
+		: "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy)
+		: GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
 		: "memory");
 		: "memory");
 	} else if (kernel_uses_llsc) {
 	} else if (kernel_uses_llsc) {
 		unsigned long dummy;
 		unsigned long dummy;
 
 
 		do {
 		do {
 			__asm__ __volatile__(
 			__asm__ __volatile__(
-			"	.set	arch=r4000			\n"
+			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
 			"	ll	%0, %3		# xchg_u32	\n"
 			"	ll	%0, %3		# xchg_u32	\n"
 			"	.set	mips0				\n"
 			"	.set	mips0				\n"
 			"	move	%2, %z4				\n"
 			"	move	%2, %z4				\n"
-			"	.set	arch=r4000			\n"
+			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
 			"	sc	%2, %1				\n"
 			"	sc	%2, %1				\n"
 			"	.set	mips0				\n"
 			"	.set	mips0				\n"
-			: "=&r" (retval), "=" GCC_OFF12_ASM() (*m),
+			: "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m),
 			  "=&r" (dummy)
 			  "=&r" (dummy)
-			: GCC_OFF12_ASM() (*m), "Jr" (val)
+			: GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
 			: "memory");
 			: "memory");
 		} while (unlikely(!dummy));
 		} while (unlikely(!dummy));
 	} else {
 	} else {
@@ -82,22 +82,22 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
 		"	scd	%2, %1					\n"
 		"	scd	%2, %1					\n"
 		"	beqzl	%2, 1b					\n"
 		"	beqzl	%2, 1b					\n"
 		"	.set	mips0					\n"
 		"	.set	mips0					\n"
-		: "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy)
-		: GCC_OFF12_ASM() (*m), "Jr" (val)
+		: "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy)
+		: GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
 		: "memory");
 		: "memory");
 	} else if (kernel_uses_llsc) {
 	} else if (kernel_uses_llsc) {
 		unsigned long dummy;
 		unsigned long dummy;
 
 
 		do {
 		do {
 			__asm__ __volatile__(
 			__asm__ __volatile__(
-			"	.set	arch=r4000			\n"
+			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
 			"	lld	%0, %3		# xchg_u64	\n"
 			"	lld	%0, %3		# xchg_u64	\n"
 			"	move	%2, %z4				\n"
 			"	move	%2, %z4				\n"
 			"	scd	%2, %1				\n"
 			"	scd	%2, %1				\n"
 			"	.set	mips0				\n"
 			"	.set	mips0				\n"
-			: "=&r" (retval), "=" GCC_OFF12_ASM() (*m),
+			: "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m),
 			  "=&r" (dummy)
 			  "=&r" (dummy)
-			: GCC_OFF12_ASM() (*m), "Jr" (val)
+			: GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
 			: "memory");
 			: "memory");
 		} while (unlikely(!dummy));
 		} while (unlikely(!dummy));
 	} else {
 	} else {
@@ -158,25 +158,25 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
 		"	beqzl	$1, 1b				\n"	\
 		"	beqzl	$1, 1b				\n"	\
 		"2:						\n"	\
 		"2:						\n"	\
 		"	.set	pop				\n"	\
 		"	.set	pop				\n"	\
-		: "=&r" (__ret), "=" GCC_OFF12_ASM() (*m)		\
-		: GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new)		\
+		: "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m)		\
+		: GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new)		\
 		: "memory");						\
 		: "memory");						\
 	} else if (kernel_uses_llsc) {					\
 	} else if (kernel_uses_llsc) {					\
 		__asm__ __volatile__(					\
 		__asm__ __volatile__(					\
 		"	.set	push				\n"	\
 		"	.set	push				\n"	\
 		"	.set	noat				\n"	\
 		"	.set	noat				\n"	\
-		"	.set	arch=r4000			\n"	\
+		"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"	\
 		"1:	" ld "	%0, %2		# __cmpxchg_asm \n"	\
 		"1:	" ld "	%0, %2		# __cmpxchg_asm \n"	\
 		"	bne	%0, %z3, 2f			\n"	\
 		"	bne	%0, %z3, 2f			\n"	\
 		"	.set	mips0				\n"	\
 		"	.set	mips0				\n"	\
 		"	move	$1, %z4				\n"	\
 		"	move	$1, %z4				\n"	\
-		"	.set	arch=r4000			\n"	\
+		"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"	\
 		"	" st "	$1, %1				\n"	\
 		"	" st "	$1, %1				\n"	\
 		"	beqz	$1, 1b				\n"	\
 		"	beqz	$1, 1b				\n"	\
 		"	.set	pop				\n"	\
 		"	.set	pop				\n"	\
 		"2:						\n"	\
 		"2:						\n"	\
-		: "=&r" (__ret), "=" GCC_OFF12_ASM() (*m)		\
-		: GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new)		\
+		: "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m)		\
+		: GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new)		\
 		: "memory");						\
 		: "memory");						\
 	} else {							\
 	} else {							\
 		unsigned long __flags;					\
 		unsigned long __flags;					\

+ 21 - 3
arch/mips/include/asm/compiler.h

@@ -16,12 +16,30 @@
 #define GCC_REG_ACCUM "accum"
 #define GCC_REG_ACCUM "accum"
 #endif
 #endif
 
 
+#ifdef CONFIG_CPU_MIPSR6
+/* All MIPS R6 toolchains support the ZC constrain */
+#define GCC_OFF_SMALL_ASM() "ZC"
+#else
 #ifndef CONFIG_CPU_MICROMIPS
 #ifndef CONFIG_CPU_MICROMIPS
-#define GCC_OFF12_ASM() "R"
+#define GCC_OFF_SMALL_ASM() "R"
 #elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)
 #elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)
-#define GCC_OFF12_ASM() "ZC"
+#define GCC_OFF_SMALL_ASM() "ZC"
 #else
 #else
 #error "microMIPS compilation unsupported with GCC older than 4.9"
 #error "microMIPS compilation unsupported with GCC older than 4.9"
-#endif
+#endif /* CONFIG_CPU_MICROMIPS */
+#endif /* CONFIG_CPU_MIPSR6 */
+
+#ifdef CONFIG_CPU_MIPSR6
+#define MIPS_ISA_LEVEL "mips64r6"
+#define MIPS_ISA_ARCH_LEVEL MIPS_ISA_LEVEL
+#define MIPS_ISA_LEVEL_RAW mips64r6
+#define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW
+#else
+/* MIPS64 is a superset of MIPS32 */
+#define MIPS_ISA_LEVEL "mips64r2"
+#define MIPS_ISA_ARCH_LEVEL "arch=r4000"
+#define MIPS_ISA_LEVEL_RAW mips64r2
+#define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW
+#endif /* CONFIG_CPU_MIPSR6 */
 
 
 #endif /* _ASM_COMPILER_H */
 #endif /* _ASM_COMPILER_H */

+ 23 - 5
arch/mips/include/asm/cpu-features.h

@@ -38,6 +38,9 @@
 #ifndef cpu_has_maar
 #ifndef cpu_has_maar
 #define cpu_has_maar		(cpu_data[0].options & MIPS_CPU_MAAR)
 #define cpu_has_maar		(cpu_data[0].options & MIPS_CPU_MAAR)
 #endif
 #endif
+#ifndef cpu_has_rw_llb
+#define cpu_has_rw_llb		(cpu_data[0].options & MIPS_CPU_RW_LLB)
+#endif
 
 
 /*
 /*
  * For the moment we don't consider R6000 and R8000 so we can assume that
  * For the moment we don't consider R6000 and R8000 so we can assume that
@@ -171,6 +174,9 @@
 #endif
 #endif
 #endif
 #endif
 
 
+#ifndef cpu_has_mips_1
+# define cpu_has_mips_1		(!cpu_has_mips_r6)
+#endif
 #ifndef cpu_has_mips_2
 #ifndef cpu_has_mips_2
 # define cpu_has_mips_2		(cpu_data[0].isa_level & MIPS_CPU_ISA_II)
 # define cpu_has_mips_2		(cpu_data[0].isa_level & MIPS_CPU_ISA_II)
 #endif
 #endif
@@ -189,12 +195,18 @@
 #ifndef cpu_has_mips32r2
 #ifndef cpu_has_mips32r2
 # define cpu_has_mips32r2	(cpu_data[0].isa_level & MIPS_CPU_ISA_M32R2)
 # define cpu_has_mips32r2	(cpu_data[0].isa_level & MIPS_CPU_ISA_M32R2)
 #endif
 #endif
+#ifndef cpu_has_mips32r6
+# define cpu_has_mips32r6	(cpu_data[0].isa_level & MIPS_CPU_ISA_M32R6)
+#endif
 #ifndef cpu_has_mips64r1
 #ifndef cpu_has_mips64r1
 # define cpu_has_mips64r1	(cpu_data[0].isa_level & MIPS_CPU_ISA_M64R1)
 # define cpu_has_mips64r1	(cpu_data[0].isa_level & MIPS_CPU_ISA_M64R1)
 #endif
 #endif
 #ifndef cpu_has_mips64r2
 #ifndef cpu_has_mips64r2
 # define cpu_has_mips64r2	(cpu_data[0].isa_level & MIPS_CPU_ISA_M64R2)
 # define cpu_has_mips64r2	(cpu_data[0].isa_level & MIPS_CPU_ISA_M64R2)
 #endif
 #endif
+#ifndef cpu_has_mips64r6
+# define cpu_has_mips64r6	(cpu_data[0].isa_level & MIPS_CPU_ISA_M64R6)
+#endif
 
 
 /*
 /*
  * Shortcuts ...
  * Shortcuts ...
@@ -208,17 +220,23 @@
 #define cpu_has_mips_4_5_r	(cpu_has_mips_4 | cpu_has_mips_5_r)
 #define cpu_has_mips_4_5_r	(cpu_has_mips_4 | cpu_has_mips_5_r)
 #define cpu_has_mips_5_r	(cpu_has_mips_5 | cpu_has_mips_r)
 #define cpu_has_mips_5_r	(cpu_has_mips_5 | cpu_has_mips_r)
 
 
-#define cpu_has_mips_4_5_r2	(cpu_has_mips_4_5 | cpu_has_mips_r2)
+#define cpu_has_mips_4_5_r2_r6	(cpu_has_mips_4_5 | cpu_has_mips_r2 | \
+				 cpu_has_mips_r6)
 
 
-#define cpu_has_mips32	(cpu_has_mips32r1 | cpu_has_mips32r2)
-#define cpu_has_mips64	(cpu_has_mips64r1 | cpu_has_mips64r2)
+#define cpu_has_mips32	(cpu_has_mips32r1 | cpu_has_mips32r2 | cpu_has_mips32r6)
+#define cpu_has_mips64	(cpu_has_mips64r1 | cpu_has_mips64r2 | cpu_has_mips64r6)
 #define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1)
 #define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1)
 #define cpu_has_mips_r2 (cpu_has_mips32r2 | cpu_has_mips64r2)
 #define cpu_has_mips_r2 (cpu_has_mips32r2 | cpu_has_mips64r2)
+#define cpu_has_mips_r6	(cpu_has_mips32r6 | cpu_has_mips64r6)
 #define cpu_has_mips_r	(cpu_has_mips32r1 | cpu_has_mips32r2 | \
 #define cpu_has_mips_r	(cpu_has_mips32r1 | cpu_has_mips32r2 | \
-			 cpu_has_mips64r1 | cpu_has_mips64r2)
+			 cpu_has_mips32r6 | cpu_has_mips64r1 | \
+			 cpu_has_mips64r2 | cpu_has_mips64r6)
+
+/* MIPSR2 and MIPSR6 have a lot of similarities */
+#define cpu_has_mips_r2_r6	(cpu_has_mips_r2 | cpu_has_mips_r6)
 
 
 #ifndef cpu_has_mips_r2_exec_hazard
 #ifndef cpu_has_mips_r2_exec_hazard
-#define cpu_has_mips_r2_exec_hazard cpu_has_mips_r2
+#define cpu_has_mips_r2_exec_hazard (cpu_has_mips_r2 | cpu_has_mips_r6)
 #endif
 #endif
 
 
 /*
 /*

+ 7 - 0
arch/mips/include/asm/cpu-type.h

@@ -54,6 +54,13 @@ static inline int __pure __get_cpu_type(const int cpu_type)
 	case CPU_M5150:
 	case CPU_M5150:
 #endif
 #endif
 
 
+#if defined(CONFIG_SYS_HAS_CPU_MIPS32_R2) || \
+    defined(CONFIG_SYS_HAS_CPU_MIPS32_R6) || \
+    defined(CONFIG_SYS_HAS_CPU_MIPS64_R2) || \
+    defined(CONFIG_SYS_HAS_CPU_MIPS64_R6)
+	case CPU_QEMU_GENERIC:
+#endif
+
 #ifdef CONFIG_SYS_HAS_CPU_MIPS64_R1
 #ifdef CONFIG_SYS_HAS_CPU_MIPS64_R1
 	case CPU_5KC:
 	case CPU_5KC:
 	case CPU_5KE:
 	case CPU_5KE:

+ 9 - 2
arch/mips/include/asm/cpu.h

@@ -93,6 +93,7 @@
  * These are the PRID's for when 23:16 == PRID_COMP_MIPS
  * These are the PRID's for when 23:16 == PRID_COMP_MIPS
  */
  */
 
 
+#define PRID_IMP_QEMU_GENERIC	0x0000
 #define PRID_IMP_4KC		0x8000
 #define PRID_IMP_4KC		0x8000
 #define PRID_IMP_5KC		0x8100
 #define PRID_IMP_5KC		0x8100
 #define PRID_IMP_20KC		0x8200
 #define PRID_IMP_20KC		0x8200
@@ -312,6 +313,8 @@ enum cpu_type_enum {
 	CPU_LOONGSON3, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS,
 	CPU_LOONGSON3, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS,
 	CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP,
 	CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP,
 
 
+	CPU_QEMU_GENERIC,
+
 	CPU_LAST
 	CPU_LAST
 };
 };
 
 
@@ -329,11 +332,14 @@ enum cpu_type_enum {
 #define MIPS_CPU_ISA_M32R2	0x00000020
 #define MIPS_CPU_ISA_M32R2	0x00000020
 #define MIPS_CPU_ISA_M64R1	0x00000040
 #define MIPS_CPU_ISA_M64R1	0x00000040
 #define MIPS_CPU_ISA_M64R2	0x00000080
 #define MIPS_CPU_ISA_M64R2	0x00000080
+#define MIPS_CPU_ISA_M32R6	0x00000100
+#define MIPS_CPU_ISA_M64R6	0x00000200
 
 
 #define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_II | MIPS_CPU_ISA_M32R1 | \
 #define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_II | MIPS_CPU_ISA_M32R1 | \
-	MIPS_CPU_ISA_M32R2)
+	MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M32R6)
 #define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \
 #define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \
-	MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)
+	MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2 | \
+	MIPS_CPU_ISA_M64R6)
 
 
 /*
 /*
  * CPU Option encodings
  * CPU Option encodings
@@ -370,6 +376,7 @@ enum cpu_type_enum {
 #define MIPS_CPU_RIXIEX		0x200000000ull /* CPU has unique exception codes for {Read, Execute}-Inhibit exceptions */
 #define MIPS_CPU_RIXIEX		0x200000000ull /* CPU has unique exception codes for {Read, Execute}-Inhibit exceptions */
 #define MIPS_CPU_MAAR		0x400000000ull /* MAAR(I) registers are present */
 #define MIPS_CPU_MAAR		0x400000000ull /* MAAR(I) registers are present */
 #define MIPS_CPU_FRE		0x800000000ull /* FRE & UFE bits implemented */
 #define MIPS_CPU_FRE		0x800000000ull /* FRE & UFE bits implemented */
+#define MIPS_CPU_RW_LLB		0x1000000000ull /* LLADDR/LLB writes are allowed */
 
 
 /*
 /*
  * CPU ASE encodings
  * CPU ASE encodings

+ 2 - 2
arch/mips/include/asm/edac.h

@@ -26,8 +26,8 @@ static inline void atomic_scrub(void *va, u32 size)
 		"	sc	%0, %1					\n"
 		"	sc	%0, %1					\n"
 		"	beqz	%0, 1b					\n"
 		"	beqz	%0, 1b					\n"
 		"	.set	mips0					\n"
 		"	.set	mips0					\n"
-		: "=&r" (temp), "=" GCC_OFF12_ASM() (*virt_addr)
-		: GCC_OFF12_ASM() (*virt_addr));
+		: "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*virt_addr)
+		: GCC_OFF_SMALL_ASM() (*virt_addr));
 
 
 		virt_addr++;
 		virt_addr++;
 	}
 	}

+ 6 - 4
arch/mips/include/asm/elf.h

@@ -417,13 +417,15 @@ extern unsigned long arch_randomize_brk(struct mm_struct *mm);
 struct arch_elf_state {
 struct arch_elf_state {
 	int fp_abi;
 	int fp_abi;
 	int interp_fp_abi;
 	int interp_fp_abi;
-	int overall_abi;
+	int overall_fp_mode;
 };
 };
 
 
+#define MIPS_ABI_FP_UNKNOWN	(-1)	/* Unknown FP ABI (kernel internal) */
+
 #define INIT_ARCH_ELF_STATE {			\
 #define INIT_ARCH_ELF_STATE {			\
-	.fp_abi = -1,				\
-	.interp_fp_abi = -1,			\
-	.overall_abi = -1,			\
+	.fp_abi = MIPS_ABI_FP_UNKNOWN,		\
+	.interp_fp_abi = MIPS_ABI_FP_UNKNOWN,	\
+	.overall_fp_mode = -1,			\
 }
 }
 
 
 extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf,
 extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf,

+ 2 - 1
arch/mips/include/asm/fpu.h

@@ -68,7 +68,8 @@ static inline int __enable_fpu(enum fpu_mode mode)
 		goto fr_common;
 		goto fr_common;
 
 
 	case FPU_64BIT:
 	case FPU_64BIT:
-#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT))
+#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) \
+      || defined(CONFIG_64BIT))
 		/* we only have a 32-bit FPU */
 		/* we only have a 32-bit FPU */
 		return SIGFPE;
 		return SIGFPE;
 #endif
 #endif

+ 12 - 12
arch/mips/include/asm/futex.h

@@ -45,19 +45,19 @@
 		"	"__UA_ADDR "\t2b, 4b			\n"	\
 		"	"__UA_ADDR "\t2b, 4b			\n"	\
 		"	.previous				\n"	\
 		"	.previous				\n"	\
 		: "=r" (ret), "=&r" (oldval),				\
 		: "=r" (ret), "=&r" (oldval),				\
-		  "=" GCC_OFF12_ASM() (*uaddr)				\
-		: "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg),	\
+		  "=" GCC_OFF_SMALL_ASM() (*uaddr)				\
+		: "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg),	\
 		  "i" (-EFAULT)						\
 		  "i" (-EFAULT)						\
 		: "memory");						\
 		: "memory");						\
 	} else if (cpu_has_llsc) {					\
 	} else if (cpu_has_llsc) {					\
 		__asm__ __volatile__(					\
 		__asm__ __volatile__(					\
 		"	.set	push				\n"	\
 		"	.set	push				\n"	\
 		"	.set	noat				\n"	\
 		"	.set	noat				\n"	\
-		"	.set	arch=r4000			\n"	\
+		"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"	\
 		"1:	"user_ll("%1", "%4")" # __futex_atomic_op\n"	\
 		"1:	"user_ll("%1", "%4")" # __futex_atomic_op\n"	\
 		"	.set	mips0				\n"	\
 		"	.set	mips0				\n"	\
 		"	" insn	"				\n"	\
 		"	" insn	"				\n"	\
-		"	.set	arch=r4000			\n"	\
+		"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"	\
 		"2:	"user_sc("$1", "%2")"			\n"	\
 		"2:	"user_sc("$1", "%2")"			\n"	\
 		"	beqz	$1, 1b				\n"	\
 		"	beqz	$1, 1b				\n"	\
 		__WEAK_LLSC_MB						\
 		__WEAK_LLSC_MB						\
@@ -74,8 +74,8 @@
 		"	"__UA_ADDR "\t2b, 4b			\n"	\
 		"	"__UA_ADDR "\t2b, 4b			\n"	\
 		"	.previous				\n"	\
 		"	.previous				\n"	\
 		: "=r" (ret), "=&r" (oldval),				\
 		: "=r" (ret), "=&r" (oldval),				\
-		  "=" GCC_OFF12_ASM() (*uaddr)				\
-		: "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg),	\
+		  "=" GCC_OFF_SMALL_ASM() (*uaddr)				\
+		: "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg),	\
 		  "i" (-EFAULT)						\
 		  "i" (-EFAULT)						\
 		: "memory");						\
 		: "memory");						\
 	} else								\
 	} else								\
@@ -174,8 +174,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 		"	"__UA_ADDR "\t1b, 4b				\n"
 		"	"__UA_ADDR "\t1b, 4b				\n"
 		"	"__UA_ADDR "\t2b, 4b				\n"
 		"	"__UA_ADDR "\t2b, 4b				\n"
 		"	.previous					\n"
 		"	.previous					\n"
-		: "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr)
-		: GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
+		: "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr)
+		: GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
 		  "i" (-EFAULT)
 		  "i" (-EFAULT)
 		: "memory");
 		: "memory");
 	} else if (cpu_has_llsc) {
 	} else if (cpu_has_llsc) {
@@ -183,12 +183,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 		"# futex_atomic_cmpxchg_inatomic			\n"
 		"# futex_atomic_cmpxchg_inatomic			\n"
 		"	.set	push					\n"
 		"	.set	push					\n"
 		"	.set	noat					\n"
 		"	.set	noat					\n"
-		"	.set	arch=r4000				\n"
+		"	.set	"MIPS_ISA_ARCH_LEVEL"			\n"
 		"1:	"user_ll("%1", "%3")"				\n"
 		"1:	"user_ll("%1", "%3")"				\n"
 		"	bne	%1, %z4, 3f				\n"
 		"	bne	%1, %z4, 3f				\n"
 		"	.set	mips0					\n"
 		"	.set	mips0					\n"
 		"	move	$1, %z5					\n"
 		"	move	$1, %z5					\n"
-		"	.set	arch=r4000				\n"
+		"	.set	"MIPS_ISA_ARCH_LEVEL"			\n"
 		"2:	"user_sc("$1", "%2")"				\n"
 		"2:	"user_sc("$1", "%2")"				\n"
 		"	beqz	$1, 1b					\n"
 		"	beqz	$1, 1b					\n"
 		__WEAK_LLSC_MB
 		__WEAK_LLSC_MB
@@ -203,8 +203,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 		"	"__UA_ADDR "\t1b, 4b				\n"
 		"	"__UA_ADDR "\t1b, 4b				\n"
 		"	"__UA_ADDR "\t2b, 4b				\n"
 		"	"__UA_ADDR "\t2b, 4b				\n"
 		"	.previous					\n"
 		"	.previous					\n"
-		: "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr)
-		: GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
+		: "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr)
+		: GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
 		  "i" (-EFAULT)
 		  "i" (-EFAULT)
 		: "memory");
 		: "memory");
 	} else
 	} else

+ 5 - 4
arch/mips/include/asm/hazards.h

@@ -11,6 +11,7 @@
 #define _ASM_HAZARDS_H
 #define _ASM_HAZARDS_H
 
 
 #include <linux/stringify.h>
 #include <linux/stringify.h>
+#include <asm/compiler.h>
 
 
 #define ___ssnop							\
 #define ___ssnop							\
 	sll	$0, $0, 1
 	sll	$0, $0, 1
@@ -21,7 +22,7 @@
 /*
 /*
  * TLB hazards
  * TLB hazards
  */
  */
-#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_CAVIUM_OCTEON)
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) && !defined(CONFIG_CPU_CAVIUM_OCTEON)
 
 
 /*
 /*
  * MIPSR2 defines ehb for hazard avoidance
  * MIPSR2 defines ehb for hazard avoidance
@@ -58,7 +59,7 @@ do {									\
 	unsigned long tmp;						\
 	unsigned long tmp;						\
 									\
 									\
 	__asm__ __volatile__(						\
 	__asm__ __volatile__(						\
-	"	.set	mips64r2				\n"	\
+	"	.set "MIPS_ISA_LEVEL"				\n"	\
 	"	dla	%0, 1f					\n"	\
 	"	dla	%0, 1f					\n"	\
 	"	jr.hb	%0					\n"	\
 	"	jr.hb	%0					\n"	\
 	"	.set	mips0					\n"	\
 	"	.set	mips0					\n"	\
@@ -132,7 +133,7 @@ do {									\
 
 
 #define instruction_hazard()						\
 #define instruction_hazard()						\
 do {									\
 do {									\
-	if (cpu_has_mips_r2)						\
+	if (cpu_has_mips_r2_r6)						\
 		__instruction_hazard();					\
 		__instruction_hazard();					\
 } while (0)
 } while (0)
 
 
@@ -240,7 +241,7 @@ do {									\
 
 
 #define __disable_fpu_hazard
 #define __disable_fpu_hazard
 
 
-#elif defined(CONFIG_CPU_MIPSR2)
+#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 
 
 #define __enable_fpu_hazard						\
 #define __enable_fpu_hazard						\
 	___ehb
 	___ehb

+ 4 - 3
arch/mips/include/asm/irqflags.h

@@ -15,9 +15,10 @@
 
 
 #include <linux/compiler.h>
 #include <linux/compiler.h>
 #include <linux/stringify.h>
 #include <linux/stringify.h>
+#include <asm/compiler.h>
 #include <asm/hazards.h>
 #include <asm/hazards.h>
 
 
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined (CONFIG_CPU_MIPSR6)
 
 
 static inline void arch_local_irq_disable(void)
 static inline void arch_local_irq_disable(void)
 {
 {
@@ -118,7 +119,7 @@ void arch_local_irq_disable(void);
 unsigned long arch_local_irq_save(void);
 unsigned long arch_local_irq_save(void);
 void arch_local_irq_restore(unsigned long flags);
 void arch_local_irq_restore(unsigned long flags);
 void __arch_local_irq_restore(unsigned long flags);
 void __arch_local_irq_restore(unsigned long flags);
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
 
 
 static inline void arch_local_irq_enable(void)
 static inline void arch_local_irq_enable(void)
 {
 {
@@ -126,7 +127,7 @@ static inline void arch_local_irq_enable(void)
 	"	.set	push						\n"
 	"	.set	push						\n"
 	"	.set	reorder						\n"
 	"	.set	reorder						\n"
 	"	.set	noat						\n"
 	"	.set	noat						\n"
-#if   defined(CONFIG_CPU_MIPSR2)
+#if   defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 	"	ei							\n"
 	"	ei							\n"
 #else
 #else
 	"	mfc0	$1,$12						\n"
 	"	mfc0	$1,$12						\n"

+ 3 - 2
arch/mips/include/asm/local.h

@@ -5,6 +5,7 @@
 #include <linux/bitops.h>
 #include <linux/bitops.h>
 #include <linux/atomic.h>
 #include <linux/atomic.h>
 #include <asm/cmpxchg.h>
 #include <asm/cmpxchg.h>
+#include <asm/compiler.h>
 #include <asm/war.h>
 #include <asm/war.h>
 
 
 typedef struct
 typedef struct
@@ -47,7 +48,7 @@ static __inline__ long local_add_return(long i, local_t * l)
 		unsigned long temp;
 		unsigned long temp;
 
 
 		__asm__ __volatile__(
 		__asm__ __volatile__(
-		"	.set	arch=r4000				\n"
+		"	.set	"MIPS_ISA_ARCH_LEVEL"			\n"
 		"1:"	__LL	"%1, %2		# local_add_return	\n"
 		"1:"	__LL	"%1, %2		# local_add_return	\n"
 		"	addu	%0, %1, %3				\n"
 		"	addu	%0, %1, %3				\n"
 			__SC	"%0, %2					\n"
 			__SC	"%0, %2					\n"
@@ -92,7 +93,7 @@ static __inline__ long local_sub_return(long i, local_t * l)
 		unsigned long temp;
 		unsigned long temp;
 
 
 		__asm__ __volatile__(
 		__asm__ __volatile__(
-		"	.set	arch=r4000				\n"
+		"	.set	"MIPS_ISA_ARCH_LEVEL"			\n"
 		"1:"	__LL	"%1, %2		# local_sub_return	\n"
 		"1:"	__LL	"%1, %2		# local_sub_return	\n"
 		"	subu	%0, %1, %3				\n"
 		"	subu	%0, %1, %3				\n"
 			__SC	"%0, %2					\n"
 			__SC	"%0, %2					\n"

+ 12 - 12
arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h

@@ -85,8 +85,8 @@ static inline void set_value_reg32(volatile u32 *const addr,
 	"	"__beqz"%0, 1b				\n"
 	"	"__beqz"%0, 1b				\n"
 	"	nop					\n"
 	"	nop					\n"
 	"	.set	pop				\n"
 	"	.set	pop				\n"
-	: "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
-	: "ir" (~mask), "ir" (value), GCC_OFF12_ASM() (*addr));
+	: "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
+	: "ir" (~mask), "ir" (value), GCC_OFF_SMALL_ASM() (*addr));
 }
 }
 
 
 /*
 /*
@@ -106,8 +106,8 @@ static inline void set_reg32(volatile u32 *const addr,
 	"	"__beqz"%0, 1b				\n"
 	"	"__beqz"%0, 1b				\n"
 	"	nop					\n"
 	"	nop					\n"
 	"	.set	pop				\n"
 	"	.set	pop				\n"
-	: "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
-	: "ir" (mask), GCC_OFF12_ASM() (*addr));
+	: "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
+	: "ir" (mask), GCC_OFF_SMALL_ASM() (*addr));
 }
 }
 
 
 /*
 /*
@@ -127,8 +127,8 @@ static inline void clear_reg32(volatile u32 *const addr,
 	"	"__beqz"%0, 1b				\n"
 	"	"__beqz"%0, 1b				\n"
 	"	nop					\n"
 	"	nop					\n"
 	"	.set	pop				\n"
 	"	.set	pop				\n"
-	: "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
-	: "ir" (~mask), GCC_OFF12_ASM() (*addr));
+	: "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
+	: "ir" (~mask), GCC_OFF_SMALL_ASM() (*addr));
 }
 }
 
 
 /*
 /*
@@ -148,8 +148,8 @@ static inline void toggle_reg32(volatile u32 *const addr,
 	"	"__beqz"%0, 1b				\n"
 	"	"__beqz"%0, 1b				\n"
 	"	nop					\n"
 	"	nop					\n"
 	"	.set	pop				\n"
 	"	.set	pop				\n"
-	: "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
-	: "ir" (mask), GCC_OFF12_ASM() (*addr));
+	: "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
+	: "ir" (mask), GCC_OFF_SMALL_ASM() (*addr));
 }
 }
 
 
 /*
 /*
@@ -220,8 +220,8 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
 	"	.set	arch=r4000			\n"	\
 	"	.set	arch=r4000			\n"	\
 	"1:	ll	%0, %1	#custom_read_reg32	\n"	\
 	"1:	ll	%0, %1	#custom_read_reg32	\n"	\
 	"	.set	pop				\n"	\
 	"	.set	pop				\n"	\
-	: "=r" (tmp), "=" GCC_OFF12_ASM() (*address)		\
-	: GCC_OFF12_ASM() (*address))
+	: "=r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address)		\
+	: GCC_OFF_SMALL_ASM() (*address))
 
 
 #define custom_write_reg32(address, tmp)			\
 #define custom_write_reg32(address, tmp)			\
 	__asm__ __volatile__(					\
 	__asm__ __volatile__(					\
@@ -231,7 +231,7 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
 	"	"__beqz"%0, 1b				\n"	\
 	"	"__beqz"%0, 1b				\n"	\
 	"	nop					\n"	\
 	"	nop					\n"	\
 	"	.set	pop				\n"	\
 	"	.set	pop				\n"	\
-	: "=&r" (tmp), "=" GCC_OFF12_ASM() (*address)		\
-	: "0" (tmp), GCC_OFF12_ASM() (*address))
+	: "=&r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address)		\
+	: "0" (tmp), GCC_OFF_SMALL_ASM() (*address))
 
 
 #endif	/* __ASM_REGOPS_H__ */
 #endif	/* __ASM_REGOPS_H__ */

+ 96 - 0
arch/mips/include/asm/mips-r2-to-r6-emul.h

@@ -0,0 +1,96 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2014 Imagination Technologies Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ */
+
+#ifndef __ASM_MIPS_R2_TO_R6_EMUL_H
+#define __ASM_MIPS_R2_TO_R6_EMUL_H
+
+struct mips_r2_emulator_stats {
+	u64 movs;
+	u64 hilo;
+	u64 muls;
+	u64 divs;
+	u64 dsps;
+	u64 bops;
+	u64 traps;
+	u64 fpus;
+	u64 loads;
+	u64 stores;
+	u64 llsc;
+	u64 dsemul;
+};
+
+struct mips_r2br_emulator_stats {
+	u64 jrs;
+	u64 bltzl;
+	u64 bgezl;
+	u64 bltzll;
+	u64 bgezll;
+	u64 bltzall;
+	u64 bgezall;
+	u64 bltzal;
+	u64 bgezal;
+	u64 beql;
+	u64 bnel;
+	u64 blezl;
+	u64 bgtzl;
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+#define MIPS_R2_STATS(M)						\
+do {									\
+	u32 nir;							\
+	int err;							\
+									\
+	preempt_disable();						\
+	__this_cpu_inc(mipsr2emustats.M);				\
+	err = __get_user(nir, (u32 __user *)regs->cp0_epc);		\
+	if (!err) {							\
+		if (nir == BREAK_MATH)					\
+			__this_cpu_inc(mipsr2bdemustats.M);		\
+	}								\
+	preempt_enable();						\
+} while (0)
+
+#define MIPS_R2BR_STATS(M)					\
+do {								\
+	preempt_disable();					\
+	__this_cpu_inc(mipsr2bremustats.M);			\
+	preempt_enable();					\
+} while (0)
+
+#else
+
+#define MIPS_R2_STATS(M)          do { } while (0)
+#define MIPS_R2BR_STATS(M)        do { } while (0)
+
+#endif /* CONFIG_DEBUG_FS */
+
+struct r2_decoder_table {
+	u32     mask;
+	u32     code;
+	int     (*func)(struct pt_regs *regs, u32 inst);
+};
+
+
+extern void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
+			  const char *str);
+
+#ifndef CONFIG_MIPSR2_TO_R6_EMULATOR
+static int mipsr2_emulation;
+static __maybe_unused int mipsr2_decoder(struct pt_regs *regs, u32 inst) { return 0; };
+#else
+/* MIPS R2 Emulator ON/OFF */
+extern int mipsr2_emulation;
+extern int mipsr2_decoder(struct pt_regs *regs, u32 inst);
+#endif /* CONFIG_MIPSR2_TO_R6_EMULATOR */
+
+#define NO_R6EMU	(cpu_has_mips_r6 && !mipsr2_emulation)
+
+#endif /* __ASM_MIPS_R2_TO_R6_EMUL_H */

+ 3 - 0
arch/mips/include/asm/mipsregs.h

@@ -653,6 +653,7 @@
 #define MIPS_CONF5_NF		(_ULCAST_(1) << 0)
 #define MIPS_CONF5_NF		(_ULCAST_(1) << 0)
 #define MIPS_CONF5_UFR		(_ULCAST_(1) << 2)
 #define MIPS_CONF5_UFR		(_ULCAST_(1) << 2)
 #define MIPS_CONF5_MRP		(_ULCAST_(1) << 3)
 #define MIPS_CONF5_MRP		(_ULCAST_(1) << 3)
+#define MIPS_CONF5_LLB		(_ULCAST_(1) << 4)
 #define MIPS_CONF5_MVH		(_ULCAST_(1) << 5)
 #define MIPS_CONF5_MVH		(_ULCAST_(1) << 5)
 #define MIPS_CONF5_FRE		(_ULCAST_(1) << 8)
 #define MIPS_CONF5_FRE		(_ULCAST_(1) << 8)
 #define MIPS_CONF5_UFE		(_ULCAST_(1) << 9)
 #define MIPS_CONF5_UFE		(_ULCAST_(1) << 9)
@@ -1127,6 +1128,8 @@ do {									\
 #define write_c0_config6(val)	__write_32bit_c0_register($16, 6, val)
 #define write_c0_config6(val)	__write_32bit_c0_register($16, 6, val)
 #define write_c0_config7(val)	__write_32bit_c0_register($16, 7, val)
 #define write_c0_config7(val)	__write_32bit_c0_register($16, 7, val)
 
 
+#define read_c0_lladdr()	__read_ulong_c0_register($17, 0)
+#define write_c0_lladdr(val)	__write_ulong_c0_register($17, 0, val)
 #define read_c0_maar()		__read_ulong_c0_register($17, 1)
 #define read_c0_maar()		__read_ulong_c0_register($17, 1)
 #define write_c0_maar(val)	__write_ulong_c0_register($17, 1, val)
 #define write_c0_maar(val)	__write_ulong_c0_register($17, 1, val)
 #define read_c0_maari()		__read_32bit_c0_register($17, 2)
 #define read_c0_maari()		__read_32bit_c0_register($17, 2)

+ 4 - 0
arch/mips/include/asm/module.h

@@ -88,10 +88,14 @@ search_module_dbetables(unsigned long addr)
 #define MODULE_PROC_FAMILY "MIPS32_R1 "
 #define MODULE_PROC_FAMILY "MIPS32_R1 "
 #elif defined CONFIG_CPU_MIPS32_R2
 #elif defined CONFIG_CPU_MIPS32_R2
 #define MODULE_PROC_FAMILY "MIPS32_R2 "
 #define MODULE_PROC_FAMILY "MIPS32_R2 "
+#elif defined CONFIG_CPU_MIPS32_R6
+#define MODULE_PROC_FAMILY "MIPS32_R6 "
 #elif defined CONFIG_CPU_MIPS64_R1
 #elif defined CONFIG_CPU_MIPS64_R1
 #define MODULE_PROC_FAMILY "MIPS64_R1 "
 #define MODULE_PROC_FAMILY "MIPS64_R1 "
 #elif defined CONFIG_CPU_MIPS64_R2
 #elif defined CONFIG_CPU_MIPS64_R2
 #define MODULE_PROC_FAMILY "MIPS64_R2 "
 #define MODULE_PROC_FAMILY "MIPS64_R2 "
+#elif defined CONFIG_CPU_MIPS64_R6
+#define MODULE_PROC_FAMILY "MIPS64_R6 "
 #elif defined CONFIG_CPU_R3000
 #elif defined CONFIG_CPU_R3000
 #define MODULE_PROC_FAMILY "R3000 "
 #define MODULE_PROC_FAMILY "R3000 "
 #elif defined CONFIG_CPU_TX39XX
 #elif defined CONFIG_CPU_TX39XX

+ 1 - 1
arch/mips/include/asm/octeon/cvmx-cmd-queue.h

@@ -275,7 +275,7 @@ static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id,
 		" lbu	%[ticket], %[now_serving]\n"
 		" lbu	%[ticket], %[now_serving]\n"
 		"4:\n"
 		"4:\n"
 		".set pop\n" :
 		".set pop\n" :
-		[ticket_ptr] "=" GCC_OFF12_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
+		[ticket_ptr] "=" GCC_OFF_SMALL_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
 		[now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp),
 		[now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp),
 		[my_ticket] "=r"(my_ticket)
 		[my_ticket] "=r"(my_ticket)
 	    );
 	    );

+ 148 - 2
arch/mips/include/asm/r4kcache.h

@@ -14,6 +14,7 @@
 
 
 #include <asm/asm.h>
 #include <asm/asm.h>
 #include <asm/cacheops.h>
 #include <asm/cacheops.h>
+#include <asm/compiler.h>
 #include <asm/cpu-features.h>
 #include <asm/cpu-features.h>
 #include <asm/cpu-type.h>
 #include <asm/cpu-type.h>
 #include <asm/mipsmtregs.h>
 #include <asm/mipsmtregs.h>
@@ -39,7 +40,7 @@ extern void (*r4k_blast_icache)(void);
 	__asm__ __volatile__(						\
 	__asm__ __volatile__(						\
 	"	.set	push					\n"	\
 	"	.set	push					\n"	\
 	"	.set	noreorder				\n"	\
 	"	.set	noreorder				\n"	\
-	"	.set	arch=r4000				\n"	\
+	"	.set "MIPS_ISA_ARCH_LEVEL"			\n"	\
 	"	cache	%0, %1					\n"	\
 	"	cache	%0, %1					\n"	\
 	"	.set	pop					\n"	\
 	"	.set	pop					\n"	\
 	:								\
 	:								\
@@ -147,7 +148,7 @@ static inline void flush_scache_line(unsigned long addr)
 	__asm__ __volatile__(					\
 	__asm__ __volatile__(					\
 	"	.set	push			\n"		\
 	"	.set	push			\n"		\
 	"	.set	noreorder		\n"		\
 	"	.set	noreorder		\n"		\
-	"	.set	arch=r4000		\n"		\
+	"	.set "MIPS_ISA_ARCH_LEVEL"	\n"		\
 	"1:	cache	%0, (%1)		\n"		\
 	"1:	cache	%0, (%1)		\n"		\
 	"2:	.set	pop			\n"		\
 	"2:	.set	pop			\n"		\
 	"	.section __ex_table,\"a\"	\n"		\
 	"	.section __ex_table,\"a\"	\n"		\
@@ -218,6 +219,7 @@ static inline void invalidate_tcache_page(unsigned long addr)
 	cache_op(Page_Invalidate_T, addr);
 	cache_op(Page_Invalidate_T, addr);
 }
 }
 
 
+#ifndef CONFIG_CPU_MIPSR6
 #define cache16_unroll32(base,op)					\
 #define cache16_unroll32(base,op)					\
 	__asm__ __volatile__(						\
 	__asm__ __volatile__(						\
 	"	.set push					\n"	\
 	"	.set push					\n"	\
@@ -322,6 +324,150 @@ static inline void invalidate_tcache_page(unsigned long addr)
 		: "r" (base),						\
 		: "r" (base),						\
 		  "i" (op));
 		  "i" (op));
 
 
+#else
+/*
+ * MIPS R6 changed the cache opcode and moved to a 8-bit offset field.
+ * This means we now need to increment the base register before we flush
+ * more cache lines
+ */
+#define cache16_unroll32(base,op)				\
+	__asm__ __volatile__(					\
+	"	.set push\n"					\
+	"	.set noreorder\n"				\
+	"	.set mips64r6\n"				\
+	"	.set noat\n"					\
+	"	cache %1, 0x000(%0); cache %1, 0x010(%0)\n"	\
+	"	cache %1, 0x020(%0); cache %1, 0x030(%0)\n"	\
+	"	cache %1, 0x040(%0); cache %1, 0x050(%0)\n"	\
+	"	cache %1, 0x060(%0); cache %1, 0x070(%0)\n"	\
+	"	cache %1, 0x080(%0); cache %1, 0x090(%0)\n"	\
+	"	cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n"	\
+	"	cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n"	\
+	"	cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n"	\
+	"	addiu $1, $0, 0x100			\n"	\
+	"	cache %1, 0x000($1); cache %1, 0x010($1)\n"	\
+	"	cache %1, 0x020($1); cache %1, 0x030($1)\n"	\
+	"	cache %1, 0x040($1); cache %1, 0x050($1)\n"	\
+	"	cache %1, 0x060($1); cache %1, 0x070($1)\n"	\
+	"	cache %1, 0x080($1); cache %1, 0x090($1)\n"	\
+	"	cache %1, 0x0a0($1); cache %1, 0x0b0($1)\n"	\
+	"	cache %1, 0x0c0($1); cache %1, 0x0d0($1)\n"	\
+	"	cache %1, 0x0e0($1); cache %1, 0x0f0($1)\n"	\
+	"	.set pop\n"					\
+		:						\
+		: "r" (base),					\
+		  "i" (op));
+
+#define cache32_unroll32(base,op)				\
+	__asm__ __volatile__(					\
+	"	.set push\n"					\
+	"	.set noreorder\n"				\
+	"	.set mips64r6\n"				\
+	"	.set noat\n"					\
+	"	cache %1, 0x000(%0); cache %1, 0x020(%0)\n"	\
+	"	cache %1, 0x040(%0); cache %1, 0x060(%0)\n"	\
+	"	cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n"	\
+	"	cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000($1); cache %1, 0x020($1)\n"	\
+	"	cache %1, 0x040($1); cache %1, 0x060($1)\n"	\
+	"	cache %1, 0x080($1); cache %1, 0x0a0($1)\n"	\
+	"	cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"	\
+	"	addiu $1, $1, 0x100\n"				\
+	"	cache %1, 0x000($1); cache %1, 0x020($1)\n"	\
+	"	cache %1, 0x040($1); cache %1, 0x060($1)\n"	\
+	"	cache %1, 0x080($1); cache %1, 0x0a0($1)\n"	\
+	"	cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"	\
+	"	addiu $1, $1, 0x100\n"				\
+	"	cache %1, 0x000($1); cache %1, 0x020($1)\n"	\
+	"	cache %1, 0x040($1); cache %1, 0x060($1)\n"	\
+	"	cache %1, 0x080($1); cache %1, 0x0a0($1)\n"	\
+	"	cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"	\
+	"	.set pop\n"					\
+		:						\
+		: "r" (base),					\
+		  "i" (op));
+
+#define cache64_unroll32(base,op)				\
+	__asm__ __volatile__(					\
+	"	.set push\n"					\
+	"	.set noreorder\n"				\
+	"	.set mips64r6\n"				\
+	"	.set noat\n"					\
+	"	cache %1, 0x000(%0); cache %1, 0x040(%0)\n"	\
+	"	cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
+	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
+	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
+	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
+	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
+	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
+	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
+	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
+	"	.set pop\n"					\
+		:						\
+		: "r" (base),					\
+		  "i" (op));
+
+#define cache128_unroll32(base,op)				\
+	__asm__ __volatile__(					\
+	"	.set push\n"					\
+	"	.set noreorder\n"				\
+	"	.set mips64r6\n"				\
+	"	.set noat\n"					\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	.set pop\n"					\
+		:						\
+		: "r" (base),					\
+		  "i" (op));
+#endif /* CONFIG_CPU_MIPSR6 */
+
 /*
 /*
  * Perform the cache operation specified by op using a user mode virtual
  * Perform the cache operation specified by op using a user mode virtual
  * address while in kernel mode.
  * address while in kernel mode.

+ 26 - 29
arch/mips/include/asm/spinlock.h

@@ -89,7 +89,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
 		"	 subu	%[ticket], %[ticket], 1			\n"
 		"	 subu	%[ticket], %[ticket], 1			\n"
 		"	.previous					\n"
 		"	.previous					\n"
 		"	.set pop					\n"
 		"	.set pop					\n"
-		: [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
+		: [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
 		  [serving_now_ptr] "+m" (lock->h.serving_now),
 		  [serving_now_ptr] "+m" (lock->h.serving_now),
 		  [ticket] "=&r" (tmp),
 		  [ticket] "=&r" (tmp),
 		  [my_ticket] "=&r" (my_ticket)
 		  [my_ticket] "=&r" (my_ticket)
@@ -122,7 +122,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
 		"	 subu	%[ticket], %[ticket], 1			\n"
 		"	 subu	%[ticket], %[ticket], 1			\n"
 		"	.previous					\n"
 		"	.previous					\n"
 		"	.set pop					\n"
 		"	.set pop					\n"
-		: [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
+		: [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
 		  [serving_now_ptr] "+m" (lock->h.serving_now),
 		  [serving_now_ptr] "+m" (lock->h.serving_now),
 		  [ticket] "=&r" (tmp),
 		  [ticket] "=&r" (tmp),
 		  [my_ticket] "=&r" (my_ticket)
 		  [my_ticket] "=&r" (my_ticket)
@@ -164,7 +164,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
 		"	 li	%[ticket], 0				\n"
 		"	 li	%[ticket], 0				\n"
 		"	.previous					\n"
 		"	.previous					\n"
 		"	.set pop					\n"
 		"	.set pop					\n"
-		: [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
+		: [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
 		  [ticket] "=&r" (tmp),
 		  [ticket] "=&r" (tmp),
 		  [my_ticket] "=&r" (tmp2),
 		  [my_ticket] "=&r" (tmp2),
 		  [now_serving] "=&r" (tmp3)
 		  [now_serving] "=&r" (tmp3)
@@ -188,7 +188,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
 		"	 li	%[ticket], 0				\n"
 		"	 li	%[ticket], 0				\n"
 		"	.previous					\n"
 		"	.previous					\n"
 		"	.set pop					\n"
 		"	.set pop					\n"
-		: [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
+		: [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
 		  [ticket] "=&r" (tmp),
 		  [ticket] "=&r" (tmp),
 		  [my_ticket] "=&r" (tmp2),
 		  [my_ticket] "=&r" (tmp2),
 		  [now_serving] "=&r" (tmp3)
 		  [now_serving] "=&r" (tmp3)
@@ -235,8 +235,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
 		"	beqzl	%1, 1b					\n"
 		"	beqzl	%1, 1b					\n"
 		"	 nop						\n"
 		"	 nop						\n"
 		"	.set	reorder					\n"
 		"	.set	reorder					\n"
-		: "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
-		: GCC_OFF12_ASM() (rw->lock)
+		: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+		: GCC_OFF_SMALL_ASM() (rw->lock)
 		: "memory");
 		: "memory");
 	} else {
 	} else {
 		do {
 		do {
@@ -245,8 +245,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
 			"	bltz	%1, 1b				\n"
 			"	bltz	%1, 1b				\n"
 			"	 addu	%1, 1				\n"
 			"	 addu	%1, 1				\n"
 			"2:	sc	%1, %0				\n"
 			"2:	sc	%1, %0				\n"
-			: "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
-			: GCC_OFF12_ASM() (rw->lock)
+			: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+			: GCC_OFF_SMALL_ASM() (rw->lock)
 			: "memory");
 			: "memory");
 		} while (unlikely(!tmp));
 		} while (unlikely(!tmp));
 	}
 	}
@@ -254,9 +254,6 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
 	smp_llsc_mb();
 	smp_llsc_mb();
 }
 }
 
 
-/* Note the use of sub, not subu which will make the kernel die with an
-   overflow exception if we ever try to unlock an rwlock that is already
-   unlocked or is being held by a writer.  */
 static inline void arch_read_unlock(arch_rwlock_t *rw)
 static inline void arch_read_unlock(arch_rwlock_t *rw)
 {
 {
 	unsigned int tmp;
 	unsigned int tmp;
@@ -266,20 +263,20 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
 	if (R10000_LLSC_WAR) {
 	if (R10000_LLSC_WAR) {
 		__asm__ __volatile__(
 		__asm__ __volatile__(
 		"1:	ll	%1, %2		# arch_read_unlock	\n"
 		"1:	ll	%1, %2		# arch_read_unlock	\n"
-		"	sub	%1, 1					\n"
+		"	addiu	%1, 1					\n"
 		"	sc	%1, %0					\n"
 		"	sc	%1, %0					\n"
 		"	beqzl	%1, 1b					\n"
 		"	beqzl	%1, 1b					\n"
-		: "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
-		: GCC_OFF12_ASM() (rw->lock)
+		: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+		: GCC_OFF_SMALL_ASM() (rw->lock)
 		: "memory");
 		: "memory");
 	} else {
 	} else {
 		do {
 		do {
 			__asm__ __volatile__(
 			__asm__ __volatile__(
 			"1:	ll	%1, %2	# arch_read_unlock	\n"
 			"1:	ll	%1, %2	# arch_read_unlock	\n"
-			"	sub	%1, 1				\n"
+			"	addiu	%1, -1				\n"
 			"	sc	%1, %0				\n"
 			"	sc	%1, %0				\n"
-			: "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
-			: GCC_OFF12_ASM() (rw->lock)
+			: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+			: GCC_OFF_SMALL_ASM() (rw->lock)
 			: "memory");
 			: "memory");
 		} while (unlikely(!tmp));
 		} while (unlikely(!tmp));
 	}
 	}
@@ -299,8 +296,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
 		"	beqzl	%1, 1b					\n"
 		"	beqzl	%1, 1b					\n"
 		"	 nop						\n"
 		"	 nop						\n"
 		"	.set	reorder					\n"
 		"	.set	reorder					\n"
-		: "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
-		: GCC_OFF12_ASM() (rw->lock)
+		: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+		: GCC_OFF_SMALL_ASM() (rw->lock)
 		: "memory");
 		: "memory");
 	} else {
 	} else {
 		do {
 		do {
@@ -309,8 +306,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
 			"	bnez	%1, 1b				\n"
 			"	bnez	%1, 1b				\n"
 			"	 lui	%1, 0x8000			\n"
 			"	 lui	%1, 0x8000			\n"
 			"2:	sc	%1, %0				\n"
 			"2:	sc	%1, %0				\n"
-			: "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
-			: GCC_OFF12_ASM() (rw->lock)
+			: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+			: GCC_OFF_SMALL_ASM() (rw->lock)
 			: "memory");
 			: "memory");
 		} while (unlikely(!tmp));
 		} while (unlikely(!tmp));
 	}
 	}
@@ -349,8 +346,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
 		__WEAK_LLSC_MB
 		__WEAK_LLSC_MB
 		"	li	%2, 1					\n"
 		"	li	%2, 1					\n"
 		"2:							\n"
 		"2:							\n"
-		: "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
-		: GCC_OFF12_ASM() (rw->lock)
+		: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
+		: GCC_OFF_SMALL_ASM() (rw->lock)
 		: "memory");
 		: "memory");
 	} else {
 	} else {
 		__asm__ __volatile__(
 		__asm__ __volatile__(
@@ -366,8 +363,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
 		__WEAK_LLSC_MB
 		__WEAK_LLSC_MB
 		"	li	%2, 1					\n"
 		"	li	%2, 1					\n"
 		"2:							\n"
 		"2:							\n"
-		: "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
-		: GCC_OFF12_ASM() (rw->lock)
+		: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
+		: GCC_OFF_SMALL_ASM() (rw->lock)
 		: "memory");
 		: "memory");
 	}
 	}
 
 
@@ -393,8 +390,8 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
 		"	li	%2, 1					\n"
 		"	li	%2, 1					\n"
 		"	.set	reorder					\n"
 		"	.set	reorder					\n"
 		"2:							\n"
 		"2:							\n"
-		: "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
-		: GCC_OFF12_ASM() (rw->lock)
+		: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
+		: GCC_OFF_SMALL_ASM() (rw->lock)
 		: "memory");
 		: "memory");
 	} else {
 	} else {
 		do {
 		do {
@@ -406,9 +403,9 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
 			"	sc	%1, %0				\n"
 			"	sc	%1, %0				\n"
 			"	li	%2, 1				\n"
 			"	li	%2, 1				\n"
 			"2:						\n"
 			"2:						\n"
-			: "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp),
+			: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
 			  "=&r" (ret)
 			  "=&r" (ret)
-			: GCC_OFF12_ASM() (rw->lock)
+			: GCC_OFF_SMALL_ASM() (rw->lock)
 			: "memory");
 			: "memory");
 		} while (unlikely(!tmp));
 		} while (unlikely(!tmp));
 
 

+ 2 - 2
arch/mips/include/asm/spram.h

@@ -1,10 +1,10 @@
 #ifndef _MIPS_SPRAM_H
 #ifndef _MIPS_SPRAM_H
 #define _MIPS_SPRAM_H
 #define _MIPS_SPRAM_H
 
 
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_MIPS_SPRAM)
 extern __init void spram_config(void);
 extern __init void spram_config(void);
 #else
 #else
 static inline void spram_config(void) { };
 static inline void spram_config(void) { };
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_MIPS_SPRAM */
 
 
 #endif /* _MIPS_SPRAM_H */
 #endif /* _MIPS_SPRAM_H */

+ 4 - 4
arch/mips/include/asm/stackframe.h

@@ -40,7 +40,7 @@
 		LONG_S	v1, PT_HI(sp)
 		LONG_S	v1, PT_HI(sp)
 		mflhxu	v1
 		mflhxu	v1
 		LONG_S	v1, PT_ACX(sp)
 		LONG_S	v1, PT_ACX(sp)
-#else
+#elif !defined(CONFIG_CPU_MIPSR6)
 		mfhi	v1
 		mfhi	v1
 #endif
 #endif
 #ifdef CONFIG_32BIT
 #ifdef CONFIG_32BIT
@@ -50,7 +50,7 @@
 		LONG_S	$10, PT_R10(sp)
 		LONG_S	$10, PT_R10(sp)
 		LONG_S	$11, PT_R11(sp)
 		LONG_S	$11, PT_R11(sp)
 		LONG_S	$12, PT_R12(sp)
 		LONG_S	$12, PT_R12(sp)
-#ifndef CONFIG_CPU_HAS_SMARTMIPS
+#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
 		LONG_S	v1, PT_HI(sp)
 		LONG_S	v1, PT_HI(sp)
 		mflo	v1
 		mflo	v1
 #endif
 #endif
@@ -58,7 +58,7 @@
 		LONG_S	$14, PT_R14(sp)
 		LONG_S	$14, PT_R14(sp)
 		LONG_S	$15, PT_R15(sp)
 		LONG_S	$15, PT_R15(sp)
 		LONG_S	$24, PT_R24(sp)
 		LONG_S	$24, PT_R24(sp)
-#ifndef CONFIG_CPU_HAS_SMARTMIPS
+#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
 		LONG_S	v1, PT_LO(sp)
 		LONG_S	v1, PT_LO(sp)
 #endif
 #endif
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
@@ -226,7 +226,7 @@
 		mtlhx	$24
 		mtlhx	$24
 		LONG_L	$24, PT_LO(sp)
 		LONG_L	$24, PT_LO(sp)
 		mtlhx	$24
 		mtlhx	$24
-#else
+#elif !defined(CONFIG_CPU_MIPSR6)
 		LONG_L	$24, PT_LO(sp)
 		LONG_L	$24, PT_LO(sp)
 		mtlo	$24
 		mtlo	$24
 		LONG_L	$24, PT_HI(sp)
 		LONG_L	$24, PT_HI(sp)

+ 6 - 3
arch/mips/include/asm/switch_to.h

@@ -75,9 +75,12 @@ do {									\
 #endif
 #endif
 
 
 #define __clear_software_ll_bit()					\
 #define __clear_software_ll_bit()					\
-do {									\
-	if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc)	\
-		ll_bit = 0;						\
+do {	if (cpu_has_rw_llb) {						\
+		write_c0_lladdr(0);					\
+	} else {							\
+		if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc)\
+			ll_bit = 0;					\
+	}								\
 } while (0)
 } while (0)
 
 
 #define switch_to(prev, next, last)					\
 #define switch_to(prev, next, last)					\

+ 1 - 1
arch/mips/include/asm/thread_info.h

@@ -28,7 +28,7 @@ struct thread_info {
 	unsigned long		tp_value;	/* thread pointer */
 	unsigned long		tp_value;	/* thread pointer */
 	__u32			cpu;		/* current CPU */
 	__u32			cpu;		/* current CPU */
 	int			preempt_count;	/* 0 => preemptable, <0 => BUG */
 	int			preempt_count;	/* 0 => preemptable, <0 => BUG */
-
+	int			r2_emul_return; /* 1 => Returning from R2 emulator */
 	mm_segment_t		addr_limit;	/*
 	mm_segment_t		addr_limit;	/*
 						 * thread address space limit:
 						 * thread address space limit:
 						 * 0x7fffffff for user-thead
 						 * 0x7fffffff for user-thead

+ 14 - 10
arch/mips/include/uapi/asm/inst.h

@@ -21,20 +21,20 @@
 enum major_op {
 enum major_op {
 	spec_op, bcond_op, j_op, jal_op,
 	spec_op, bcond_op, j_op, jal_op,
 	beq_op, bne_op, blez_op, bgtz_op,
 	beq_op, bne_op, blez_op, bgtz_op,
-	addi_op, addiu_op, slti_op, sltiu_op,
+	addi_op, cbcond0_op = addi_op, addiu_op, slti_op, sltiu_op,
 	andi_op, ori_op, xori_op, lui_op,
 	andi_op, ori_op, xori_op, lui_op,
 	cop0_op, cop1_op, cop2_op, cop1x_op,
 	cop0_op, cop1_op, cop2_op, cop1x_op,
 	beql_op, bnel_op, blezl_op, bgtzl_op,
 	beql_op, bnel_op, blezl_op, bgtzl_op,
-	daddi_op, daddiu_op, ldl_op, ldr_op,
+	daddi_op, cbcond1_op = daddi_op, daddiu_op, ldl_op, ldr_op,
 	spec2_op, jalx_op, mdmx_op, spec3_op,
 	spec2_op, jalx_op, mdmx_op, spec3_op,
 	lb_op, lh_op, lwl_op, lw_op,
 	lb_op, lh_op, lwl_op, lw_op,
 	lbu_op, lhu_op, lwr_op, lwu_op,
 	lbu_op, lhu_op, lwr_op, lwu_op,
 	sb_op, sh_op, swl_op, sw_op,
 	sb_op, sh_op, swl_op, sw_op,
 	sdl_op, sdr_op, swr_op, cache_op,
 	sdl_op, sdr_op, swr_op, cache_op,
-	ll_op, lwc1_op, lwc2_op, pref_op,
-	lld_op, ldc1_op, ldc2_op, ld_op,
-	sc_op, swc1_op, swc2_op, major_3b_op,
-	scd_op, sdc1_op, sdc2_op, sd_op
+	ll_op, lwc1_op, lwc2_op, bc6_op = lwc2_op, pref_op,
+	lld_op, ldc1_op, ldc2_op, beqzcjic_op = ldc2_op, ld_op,
+	sc_op, swc1_op, swc2_op, balc6_op = swc2_op, major_3b_op,
+	scd_op, sdc1_op, sdc2_op, bnezcjialc_op = sdc2_op, sd_op
 };
 };
 
 
 /*
 /*
@@ -83,9 +83,12 @@ enum spec3_op {
 	swe_op    = 0x1f, bshfl_op  = 0x20,
 	swe_op    = 0x1f, bshfl_op  = 0x20,
 	swle_op   = 0x21, swre_op   = 0x22,
 	swle_op   = 0x21, swre_op   = 0x22,
 	prefe_op  = 0x23, dbshfl_op = 0x24,
 	prefe_op  = 0x23, dbshfl_op = 0x24,
-	lbue_op   = 0x28, lhue_op   = 0x29,
-	lbe_op    = 0x2c, lhe_op    = 0x2d,
-	lle_op    = 0x2e, lwe_op    = 0x2f,
+	cache6_op = 0x25, sc6_op    = 0x26,
+	scd6_op   = 0x27, lbue_op   = 0x28,
+	lhue_op   = 0x29, lbe_op    = 0x2c,
+	lhe_op    = 0x2d, lle_op    = 0x2e,
+	lwe_op    = 0x2f, pref6_op  = 0x35,
+	ll6_op    = 0x36, lld6_op   = 0x37,
 	rdhwr_op  = 0x3b
 	rdhwr_op  = 0x3b
 };
 };
 
 
@@ -112,7 +115,8 @@ enum cop_op {
 	mfhc_op       = 0x03, mtc_op	    = 0x04,
 	mfhc_op       = 0x03, mtc_op	    = 0x04,
 	dmtc_op	      = 0x05, ctc_op	    = 0x06,
 	dmtc_op	      = 0x05, ctc_op	    = 0x06,
 	mthc0_op      = 0x06, mthc_op	    = 0x07,
 	mthc0_op      = 0x06, mthc_op	    = 0x07,
-	bc_op	      = 0x08, cop_op	    = 0x10,
+	bc_op	      = 0x08, bc1eqz_op     = 0x09,
+	bc1nez_op     = 0x0d, cop_op	    = 0x10,
 	copm_op	      = 0x18
 	copm_op	      = 0x18
 };
 };
 
 

+ 2 - 1
arch/mips/kernel/Makefile

@@ -52,7 +52,7 @@ obj-$(CONFIG_MIPS_MT_SMP)	+= smp-mt.o
 obj-$(CONFIG_MIPS_CMP)		+= smp-cmp.o
 obj-$(CONFIG_MIPS_CMP)		+= smp-cmp.o
 obj-$(CONFIG_MIPS_CPS)		+= smp-cps.o cps-vec.o
 obj-$(CONFIG_MIPS_CPS)		+= smp-cps.o cps-vec.o
 obj-$(CONFIG_MIPS_GIC_IPI)	+= smp-gic.o
 obj-$(CONFIG_MIPS_GIC_IPI)	+= smp-gic.o
-obj-$(CONFIG_CPU_MIPSR2)	+= spram.o
+obj-$(CONFIG_MIPS_SPRAM)	+= spram.o
 
 
 obj-$(CONFIG_MIPS_VPE_LOADER)	+= vpe.o
 obj-$(CONFIG_MIPS_VPE_LOADER)	+= vpe.o
 obj-$(CONFIG_MIPS_VPE_LOADER_CMP) += vpe-cmp.o
 obj-$(CONFIG_MIPS_VPE_LOADER_CMP) += vpe-cmp.o
@@ -90,6 +90,7 @@ obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
 obj-$(CONFIG_EARLY_PRINTK_8250)	+= early_printk_8250.o
 obj-$(CONFIG_EARLY_PRINTK_8250)	+= early_printk_8250.o
 obj-$(CONFIG_SPINLOCK_TEST)	+= spinlock_test.o
 obj-$(CONFIG_SPINLOCK_TEST)	+= spinlock_test.o
 obj-$(CONFIG_MIPS_MACHINE)	+= mips_machine.o
 obj-$(CONFIG_MIPS_MACHINE)	+= mips_machine.o
+obj-$(CONFIG_MIPSR2_TO_R6_EMULATOR)	+= mips-r2-to-r6-emul.o
 
 
 CFLAGS_cpu-bugs64.o	= $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
 CFLAGS_cpu-bugs64.o	= $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
 
 

+ 1 - 0
arch/mips/kernel/asm-offsets.c

@@ -97,6 +97,7 @@ void output_thread_info_defines(void)
 	OFFSET(TI_TP_VALUE, thread_info, tp_value);
 	OFFSET(TI_TP_VALUE, thread_info, tp_value);
 	OFFSET(TI_CPU, thread_info, cpu);
 	OFFSET(TI_CPU, thread_info, cpu);
 	OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
 	OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
+	OFFSET(TI_R2_EMUL_RET, thread_info, r2_emul_return);
 	OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
 	OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
 	OFFSET(TI_RESTART_BLOCK, thread_info, restart_block);
 	OFFSET(TI_RESTART_BLOCK, thread_info, restart_block);
 	OFFSET(TI_REGS, thread_info, regs);
 	OFFSET(TI_REGS, thread_info, regs);

+ 250 - 38
arch/mips/kernel/branch.c

@@ -16,6 +16,7 @@
 #include <asm/fpu.h>
 #include <asm/fpu.h>
 #include <asm/fpu_emulator.h>
 #include <asm/fpu_emulator.h>
 #include <asm/inst.h>
 #include <asm/inst.h>
+#include <asm/mips-r2-to-r6-emul.h>
 #include <asm/ptrace.h>
 #include <asm/ptrace.h>
 #include <asm/uaccess.h>
 #include <asm/uaccess.h>
 
 
@@ -399,11 +400,21 @@ int __MIPS16e_compute_return_epc(struct pt_regs *regs)
  * @returns:	-EFAULT on error and forces SIGBUS, and on success
  * @returns:	-EFAULT on error and forces SIGBUS, and on success
  *		returns 0 or BRANCH_LIKELY_TAKEN as appropriate after
  *		returns 0 or BRANCH_LIKELY_TAKEN as appropriate after
  *		evaluating the branch.
  *		evaluating the branch.
+ *
+ * MIPS R6 Compact branches and forbidden slots:
+ *	Compact branches do not throw exceptions because they do
+ *	not have delay slots. The forbidden slot instruction ($PC+4)
+ *	is only executed if the branch was not taken. Otherwise the
+ *	forbidden slot is skipped entirely. This means that the
+ *	only possible reason to be here because of a MIPS R6 compact
+ *	branch instruction is that the forbidden slot has thrown one.
+ *	In that case the branch was not taken, so the EPC can be safely
+ *	set to EPC + 8.
  */
  */
 int __compute_return_epc_for_insn(struct pt_regs *regs,
 int __compute_return_epc_for_insn(struct pt_regs *regs,
 				   union mips_instruction insn)
 				   union mips_instruction insn)
 {
 {
-	unsigned int bit, fcr31, dspcontrol;
+	unsigned int bit, fcr31, dspcontrol, reg;
 	long epc = regs->cp0_epc;
 	long epc = regs->cp0_epc;
 	int ret = 0;
 	int ret = 0;
 
 
@@ -417,6 +428,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
 			regs->regs[insn.r_format.rd] = epc + 8;
 			regs->regs[insn.r_format.rd] = epc + 8;
 			/* Fall through */
 			/* Fall through */
 		case jr_op:
 		case jr_op:
+			if (NO_R6EMU && insn.r_format.func == jr_op)
+				goto sigill_r6;
 			regs->cp0_epc = regs->regs[insn.r_format.rs];
 			regs->cp0_epc = regs->regs[insn.r_format.rs];
 			break;
 			break;
 		}
 		}
@@ -429,8 +442,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
 	 */
 	 */
 	case bcond_op:
 	case bcond_op:
 		switch (insn.i_format.rt) {
 		switch (insn.i_format.rt) {
-		case bltz_op:
 		case bltzl_op:
 		case bltzl_op:
+			if (NO_R6EMU)
+				goto sigill_r6;
+		case bltz_op:
 			if ((long)regs->regs[insn.i_format.rs] < 0) {
 			if ((long)regs->regs[insn.i_format.rs] < 0) {
 				epc = epc + 4 + (insn.i_format.simmediate << 2);
 				epc = epc + 4 + (insn.i_format.simmediate << 2);
 				if (insn.i_format.rt == bltzl_op)
 				if (insn.i_format.rt == bltzl_op)
@@ -440,8 +455,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
 			regs->cp0_epc = epc;
 			regs->cp0_epc = epc;
 			break;
 			break;
 
 
-		case bgez_op:
 		case bgezl_op:
 		case bgezl_op:
+			if (NO_R6EMU)
+				goto sigill_r6;
+		case bgez_op:
 			if ((long)regs->regs[insn.i_format.rs] >= 0) {
 			if ((long)regs->regs[insn.i_format.rs] >= 0) {
 				epc = epc + 4 + (insn.i_format.simmediate << 2);
 				epc = epc + 4 + (insn.i_format.simmediate << 2);
 				if (insn.i_format.rt == bgezl_op)
 				if (insn.i_format.rt == bgezl_op)
@@ -453,7 +470,29 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
 
 
 		case bltzal_op:
 		case bltzal_op:
 		case bltzall_op:
 		case bltzall_op:
+			if (NO_R6EMU && (insn.i_format.rs ||
+			    insn.i_format.rt == bltzall_op)) {
+				ret = -SIGILL;
+				break;
+			}
 			regs->regs[31] = epc + 8;
 			regs->regs[31] = epc + 8;
+			/*
+			 * OK we are here either because we hit a NAL
+			 * instruction or because we are emulating an
+			 * old bltzal{,l} one. Lets figure out what the
+			 * case really is.
+			 */
+			if (!insn.i_format.rs) {
+				/*
+				 * NAL or BLTZAL with rs == 0
+				 * Doesn't matter if we are R6 or not. The
+				 * result is the same
+				 */
+				regs->cp0_epc += 4 +
+					(insn.i_format.simmediate << 2);
+				break;
+			}
+			/* Now do the real thing for non-R6 BLTZAL{,L} */
 			if ((long)regs->regs[insn.i_format.rs] < 0) {
 			if ((long)regs->regs[insn.i_format.rs] < 0) {
 				epc = epc + 4 + (insn.i_format.simmediate << 2);
 				epc = epc + 4 + (insn.i_format.simmediate << 2);
 				if (insn.i_format.rt == bltzall_op)
 				if (insn.i_format.rt == bltzall_op)
@@ -465,7 +504,29 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
 
 
 		case bgezal_op:
 		case bgezal_op:
 		case bgezall_op:
 		case bgezall_op:
+			if (NO_R6EMU && (insn.i_format.rs ||
+			    insn.i_format.rt == bgezall_op)) {
+				ret = -SIGILL;
+				break;
+			}
 			regs->regs[31] = epc + 8;
 			regs->regs[31] = epc + 8;
+			/*
+			 * OK we are here either because we hit a BAL
+			 * instruction or because we are emulating an
+			 * old bgezal{,l} one. Lets figure out what the
+			 * case really is.
+			 */
+			if (!insn.i_format.rs) {
+				/*
+				 * BAL or BGEZAL with rs == 0
+				 * Doesn't matter if we are R6 or not. The
+				 * result is the same
+				 */
+				regs->cp0_epc += 4 +
+					(insn.i_format.simmediate << 2);
+				break;
+			}
+			/* Now do the real thing for non-R6 BGEZAL{,L} */
 			if ((long)regs->regs[insn.i_format.rs] >= 0) {
 			if ((long)regs->regs[insn.i_format.rs] >= 0) {
 				epc = epc + 4 + (insn.i_format.simmediate << 2);
 				epc = epc + 4 + (insn.i_format.simmediate << 2);
 				if (insn.i_format.rt == bgezall_op)
 				if (insn.i_format.rt == bgezall_op)
@@ -477,7 +538,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
 
 
 		case bposge32_op:
 		case bposge32_op:
 			if (!cpu_has_dsp)
 			if (!cpu_has_dsp)
-				goto sigill;
+				goto sigill_dsp;
 
 
 			dspcontrol = rddsp(0x01);
 			dspcontrol = rddsp(0x01);
 
 
@@ -508,8 +569,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
 	/*
 	/*
 	 * These are conditional and in i_format.
 	 * These are conditional and in i_format.
 	 */
 	 */
-	case beq_op:
 	case beql_op:
 	case beql_op:
+		if (NO_R6EMU)
+			goto sigill_r6;
+	case beq_op:
 		if (regs->regs[insn.i_format.rs] ==
 		if (regs->regs[insn.i_format.rs] ==
 		    regs->regs[insn.i_format.rt]) {
 		    regs->regs[insn.i_format.rt]) {
 			epc = epc + 4 + (insn.i_format.simmediate << 2);
 			epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -520,8 +583,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
 		regs->cp0_epc = epc;
 		regs->cp0_epc = epc;
 		break;
 		break;
 
 
-	case bne_op:
 	case bnel_op:
 	case bnel_op:
+		if (NO_R6EMU)
+			goto sigill_r6;
+	case bne_op:
 		if (regs->regs[insn.i_format.rs] !=
 		if (regs->regs[insn.i_format.rs] !=
 		    regs->regs[insn.i_format.rt]) {
 		    regs->regs[insn.i_format.rt]) {
 			epc = epc + 4 + (insn.i_format.simmediate << 2);
 			epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -532,8 +597,31 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
 		regs->cp0_epc = epc;
 		regs->cp0_epc = epc;
 		break;
 		break;
 
 
-	case blez_op: /* not really i_format */
-	case blezl_op:
+	case blezl_op: /* not really i_format */
+		if (NO_R6EMU)
+			goto sigill_r6;
+	case blez_op:
+		/*
+		 * Compact branches for R6 for the
+		 * blez and blezl opcodes.
+		 * BLEZ  | rs = 0 | rt != 0  == BLEZALC
+		 * BLEZ  | rs = rt != 0      == BGEZALC
+		 * BLEZ  | rs != 0 | rt != 0 == BGEUC
+		 * BLEZL | rs = 0 | rt != 0  == BLEZC
+		 * BLEZL | rs = rt != 0      == BGEZC
+		 * BLEZL | rs != 0 | rt != 0 == BGEC
+		 *
+		 * For real BLEZ{,L}, rt is always 0.
+		 */
+
+		if (cpu_has_mips_r6 && insn.i_format.rt) {
+			if ((insn.i_format.opcode == blez_op) &&
+			    ((!insn.i_format.rs && insn.i_format.rt) ||
+			     (insn.i_format.rs == insn.i_format.rt)))
+				regs->regs[31] = epc + 4;
+			regs->cp0_epc += 8;
+			break;
+		}
 		/* rt field assumed to be zero */
 		/* rt field assumed to be zero */
 		if ((long)regs->regs[insn.i_format.rs] <= 0) {
 		if ((long)regs->regs[insn.i_format.rs] <= 0) {
 			epc = epc + 4 + (insn.i_format.simmediate << 2);
 			epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -544,8 +632,32 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
 		regs->cp0_epc = epc;
 		regs->cp0_epc = epc;
 		break;
 		break;
 
 
-	case bgtz_op:
 	case bgtzl_op:
 	case bgtzl_op:
+		if (NO_R6EMU)
+			goto sigill_r6;
+	case bgtz_op:
+		/*
+		 * Compact branches for R6 for the
+		 * bgtz and bgtzl opcodes.
+		 * BGTZ  | rs = 0 | rt != 0  == BGTZALC
+		 * BGTZ  | rs = rt != 0      == BLTZALC
+		 * BGTZ  | rs != 0 | rt != 0 == BLTUC
+		 * BGTZL | rs = 0 | rt != 0  == BGTZC
+		 * BGTZL | rs = rt != 0      == BLTZC
+		 * BGTZL | rs != 0 | rt != 0 == BLTC
+		 *
+		 * *ZALC varint for BGTZ &&& rt != 0
+		 * For real GTZ{,L}, rt is always 0.
+		 */
+		if (cpu_has_mips_r6 && insn.i_format.rt) {
+			if ((insn.i_format.opcode == blez_op) &&
+			    ((!insn.i_format.rs && insn.i_format.rt) ||
+			    (insn.i_format.rs == insn.i_format.rt)))
+				regs->regs[31] = epc + 4;
+			regs->cp0_epc += 8;
+			break;
+		}
+
 		/* rt field assumed to be zero */
 		/* rt field assumed to be zero */
 		if ((long)regs->regs[insn.i_format.rs] > 0) {
 		if ((long)regs->regs[insn.i_format.rs] > 0) {
 			epc = epc + 4 + (insn.i_format.simmediate << 2);
 			epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -560,40 +672,83 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
 	 * And now the FPA/cp1 branch instructions.
 	 * And now the FPA/cp1 branch instructions.
 	 */
 	 */
 	case cop1_op:
 	case cop1_op:
-		preempt_disable();
-		if (is_fpu_owner())
-		        fcr31 = read_32bit_cp1_register(CP1_STATUS);
-		else
-			fcr31 = current->thread.fpu.fcr31;
-		preempt_enable();
-
-		bit = (insn.i_format.rt >> 2);
-		bit += (bit != 0);
-		bit += 23;
-		switch (insn.i_format.rt & 3) {
-		case 0: /* bc1f */
-		case 2: /* bc1fl */
-			if (~fcr31 & (1 << bit)) {
-				epc = epc + 4 + (insn.i_format.simmediate << 2);
-				if (insn.i_format.rt == 2)
-					ret = BRANCH_LIKELY_TAKEN;
-			} else
+		if (cpu_has_mips_r6 &&
+		    ((insn.i_format.rs == bc1eqz_op) ||
+		     (insn.i_format.rs == bc1nez_op))) {
+			if (!used_math()) { /* First time FPU user */
+				ret = init_fpu();
+				if (ret && NO_R6EMU) {
+					ret = -ret;
+					break;
+				}
+				ret = 0;
+				set_used_math();
+			}
+			lose_fpu(1);    /* Save FPU state for the emulator. */
+			reg = insn.i_format.rt;
+			bit = 0;
+			switch (insn.i_format.rs) {
+			case bc1eqz_op:
+				/* Test bit 0 */
+				if (get_fpr32(&current->thread.fpu.fpr[reg], 0)
+				    & 0x1)
+					bit = 1;
+				break;
+			case bc1nez_op:
+				/* Test bit 0 */
+				if (!(get_fpr32(&current->thread.fpu.fpr[reg], 0)
+				      & 0x1))
+					bit = 1;
+				break;
+			}
+			own_fpu(1);
+			if (bit)
+				epc = epc + 4 +
+					(insn.i_format.simmediate << 2);
+			else
 				epc += 8;
 				epc += 8;
 			regs->cp0_epc = epc;
 			regs->cp0_epc = epc;
+
 			break;
 			break;
+		} else {
 
 
-		case 1: /* bc1t */
-		case 3: /* bc1tl */
-			if (fcr31 & (1 << bit)) {
-				epc = epc + 4 + (insn.i_format.simmediate << 2);
-				if (insn.i_format.rt == 3)
-					ret = BRANCH_LIKELY_TAKEN;
-			} else
-				epc += 8;
-			regs->cp0_epc = epc;
+			preempt_disable();
+			if (is_fpu_owner())
+			        fcr31 = read_32bit_cp1_register(CP1_STATUS);
+			else
+				fcr31 = current->thread.fpu.fcr31;
+			preempt_enable();
+
+			bit = (insn.i_format.rt >> 2);
+			bit += (bit != 0);
+			bit += 23;
+			switch (insn.i_format.rt & 3) {
+			case 0: /* bc1f */
+			case 2: /* bc1fl */
+				if (~fcr31 & (1 << bit)) {
+					epc = epc + 4 +
+						(insn.i_format.simmediate << 2);
+					if (insn.i_format.rt == 2)
+						ret = BRANCH_LIKELY_TAKEN;
+				} else
+					epc += 8;
+				regs->cp0_epc = epc;
+				break;
+
+			case 1: /* bc1t */
+			case 3: /* bc1tl */
+				if (fcr31 & (1 << bit)) {
+					epc = epc + 4 +
+						(insn.i_format.simmediate << 2);
+					if (insn.i_format.rt == 3)
+						ret = BRANCH_LIKELY_TAKEN;
+				} else
+					epc += 8;
+				regs->cp0_epc = epc;
+				break;
+			}
 			break;
 			break;
 		}
 		}
-		break;
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
 	case lwc2_op: /* This is bbit0 on Octeon */
 	case lwc2_op: /* This is bbit0 on Octeon */
 		if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
 		if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
@@ -626,15 +781,72 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
 			epc += 8;
 			epc += 8;
 		regs->cp0_epc = epc;
 		regs->cp0_epc = epc;
 		break;
 		break;
+#else
+	case bc6_op:
+		/* Only valid for MIPS R6 */
+		if (!cpu_has_mips_r6) {
+			ret = -SIGILL;
+			break;
+		}
+		regs->cp0_epc += 8;
+		break;
+	case balc6_op:
+		if (!cpu_has_mips_r6) {
+			ret = -SIGILL;
+			break;
+		}
+		/* Compact branch: BALC */
+		regs->regs[31] = epc + 4;
+		epc += 4 + (insn.i_format.simmediate << 2);
+		regs->cp0_epc = epc;
+		break;
+	case beqzcjic_op:
+		if (!cpu_has_mips_r6) {
+			ret = -SIGILL;
+			break;
+		}
+		/* Compact branch: BEQZC || JIC */
+		regs->cp0_epc += 8;
+		break;
+	case bnezcjialc_op:
+		if (!cpu_has_mips_r6) {
+			ret = -SIGILL;
+			break;
+		}
+		/* Compact branch: BNEZC || JIALC */
+		if (insn.i_format.rs)
+			regs->regs[31] = epc + 4;
+		regs->cp0_epc += 8;
+		break;
 #endif
 #endif
+	case cbcond0_op:
+	case cbcond1_op:
+		/* Only valid for MIPS R6 */
+		if (!cpu_has_mips_r6) {
+			ret = -SIGILL;
+			break;
+		}
+		/*
+		 * Compact branches:
+		 * bovc, beqc, beqzalc, bnvc, bnec, bnezlac
+		 */
+		if (insn.i_format.rt && !insn.i_format.rs)
+			regs->regs[31] = epc + 4;
+		regs->cp0_epc += 8;
+		break;
 	}
 	}
 
 
 	return ret;
 	return ret;
 
 
-sigill:
+sigill_dsp:
 	printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm);
 	printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm);
 	force_sig(SIGBUS, current);
 	force_sig(SIGBUS, current);
 	return -EFAULT;
 	return -EFAULT;
+sigill_r6:
+	pr_info("%s: R2 branch but r2-to-r6 emulator is not preset - sending SIGILL.\n",
+		current->comm);
+	force_sig(SIGILL, current);
+	return -EFAULT;
 }
 }
 EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn);
 EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn);
 
 

+ 1 - 1
arch/mips/kernel/cevt-r4k.c

@@ -39,7 +39,7 @@ int cp0_timer_irq_installed;
 
 
 irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
 irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
 {
 {
-	const int r2 = cpu_has_mips_r2;
+	const int r2 = cpu_has_mips_r2_r6;
 	struct clock_event_device *cd;
 	struct clock_event_device *cd;
 	int cpu = smp_processor_id();
 	int cpu = smp_processor_id();
 
 

+ 8 - 8
arch/mips/kernel/cps-vec.S

@@ -99,11 +99,11 @@ not_nmi:
 	xori	t2, t1, 0x7
 	xori	t2, t1, 0x7
 	beqz	t2, 1f
 	beqz	t2, 1f
 	 li	t3, 32
 	 li	t3, 32
-	addi	t1, t1, 1
+	addiu	t1, t1, 1
 	sllv	t1, t3, t1
 	sllv	t1, t3, t1
 1:	/* At this point t1 == I-cache sets per way */
 1:	/* At this point t1 == I-cache sets per way */
 	_EXT	t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
 	_EXT	t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
-	addi	t2, t2, 1
+	addiu	t2, t2, 1
 	mul	t1, t1, t0
 	mul	t1, t1, t0
 	mul	t1, t1, t2
 	mul	t1, t1, t2
 
 
@@ -126,11 +126,11 @@ icache_done:
 	xori	t2, t1, 0x7
 	xori	t2, t1, 0x7
 	beqz	t2, 1f
 	beqz	t2, 1f
 	 li	t3, 32
 	 li	t3, 32
-	addi	t1, t1, 1
+	addiu	t1, t1, 1
 	sllv	t1, t3, t1
 	sllv	t1, t3, t1
 1:	/* At this point t1 == D-cache sets per way */
 1:	/* At this point t1 == D-cache sets per way */
 	_EXT	t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
 	_EXT	t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
-	addi	t2, t2, 1
+	addiu	t2, t2, 1
 	mul	t1, t1, t0
 	mul	t1, t1, t0
 	mul	t1, t1, t2
 	mul	t1, t1, t2
 
 
@@ -250,7 +250,7 @@ LEAF(mips_cps_core_init)
 	mfc0	t0, CP0_MVPCONF0
 	mfc0	t0, CP0_MVPCONF0
 	srl	t0, t0, MVPCONF0_PVPE_SHIFT
 	srl	t0, t0, MVPCONF0_PVPE_SHIFT
 	andi	t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
 	andi	t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
-	addi	t7, t0, 1
+	addiu	t7, t0, 1
 
 
 	/* If there's only 1, we're done */
 	/* If there's only 1, we're done */
 	beqz	t0, 2f
 	beqz	t0, 2f
@@ -280,7 +280,7 @@ LEAF(mips_cps_core_init)
 	mttc0	t0, CP0_TCHALT
 	mttc0	t0, CP0_TCHALT
 
 
 	/* Next VPE */
 	/* Next VPE */
-	addi	t5, t5, 1
+	addiu	t5, t5, 1
 	slt	t0, t5, t7
 	slt	t0, t5, t7
 	bnez	t0, 1b
 	bnez	t0, 1b
 	 nop
 	 nop
@@ -317,7 +317,7 @@ LEAF(mips_cps_boot_vpes)
 	mfc0	t1, CP0_MVPCONF0
 	mfc0	t1, CP0_MVPCONF0
 	srl	t1, t1, MVPCONF0_PVPE_SHIFT
 	srl	t1, t1, MVPCONF0_PVPE_SHIFT
 	andi	t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
 	andi	t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
-	addi	t1, t1, 1
+	addiu	t1, t1, 1
 
 
 	/* Calculate a mask for the VPE ID from EBase.CPUNum */
 	/* Calculate a mask for the VPE ID from EBase.CPUNum */
 	clz	t1, t1
 	clz	t1, t1
@@ -424,7 +424,7 @@ LEAF(mips_cps_boot_vpes)
 
 
 	/* Next VPE */
 	/* Next VPE */
 2:	srl	t6, t6, 1
 2:	srl	t6, t6, 1
-	addi	t5, t5, 1
+	addiu	t5, t5, 1
 	bnez	t6, 1b
 	bnez	t6, 1b
 	 nop
 	 nop
 
 

+ 7 - 4
arch/mips/kernel/cpu-bugs64.c

@@ -244,7 +244,7 @@ static inline void check_daddi(void)
 	panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
 	panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
 }
 }
 
 
-int daddiu_bug	= -1;
+int daddiu_bug	= config_enabled(CONFIG_CPU_MIPSR6) ? 0 : -1;
 
 
 static inline void check_daddiu(void)
 static inline void check_daddiu(void)
 {
 {
@@ -314,11 +314,14 @@ static inline void check_daddiu(void)
 
 
 void __init check_bugs64_early(void)
 void __init check_bugs64_early(void)
 {
 {
-	check_mult_sh();
-	check_daddiu();
+	if (!config_enabled(CONFIG_CPU_MIPSR6)) {
+		check_mult_sh();
+		check_daddiu();
+	}
 }
 }
 
 
 void __init check_bugs64(void)
 void __init check_bugs64(void)
 {
 {
-	check_daddi();
+	if (!config_enabled(CONFIG_CPU_MIPSR6))
+		check_daddi();
 }
 }

+ 23 - 4
arch/mips/kernel/cpu-probe.c

@@ -237,6 +237,13 @@ static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
 		c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III;
 		c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III;
 		break;
 		break;
 
 
+	/* R6 incompatible with everything else */
+	case MIPS_CPU_ISA_M64R6:
+		c->isa_level |= MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6;
+	case MIPS_CPU_ISA_M32R6:
+		c->isa_level |= MIPS_CPU_ISA_M32R6;
+		/* Break here so we don't add incompatible ISAs */
+		break;
 	case MIPS_CPU_ISA_M32R2:
 	case MIPS_CPU_ISA_M32R2:
 		c->isa_level |= MIPS_CPU_ISA_M32R2;
 		c->isa_level |= MIPS_CPU_ISA_M32R2;
 	case MIPS_CPU_ISA_M32R1:
 	case MIPS_CPU_ISA_M32R1:
@@ -326,6 +333,9 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c)
 		case 1:
 		case 1:
 			set_isa(c, MIPS_CPU_ISA_M32R2);
 			set_isa(c, MIPS_CPU_ISA_M32R2);
 			break;
 			break;
+		case 2:
+			set_isa(c, MIPS_CPU_ISA_M32R6);
+			break;
 		default:
 		default:
 			goto unknown;
 			goto unknown;
 		}
 		}
@@ -338,6 +348,9 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c)
 		case 1:
 		case 1:
 			set_isa(c, MIPS_CPU_ISA_M64R2);
 			set_isa(c, MIPS_CPU_ISA_M64R2);
 			break;
 			break;
+		case 2:
+			set_isa(c, MIPS_CPU_ISA_M64R6);
+			break;
 		default:
 		default:
 			goto unknown;
 			goto unknown;
 		}
 		}
@@ -501,6 +514,8 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c)
 		c->options |= MIPS_CPU_EVA;
 		c->options |= MIPS_CPU_EVA;
 	if (config5 & MIPS_CONF5_MRP)
 	if (config5 & MIPS_CONF5_MRP)
 		c->options |= MIPS_CPU_MAAR;
 		c->options |= MIPS_CPU_MAAR;
+	if (config5 & MIPS_CONF5_LLB)
+		c->options |= MIPS_CPU_RW_LLB;
 
 
 	return config5 & MIPS_CONF_M;
 	return config5 & MIPS_CONF_M;
 }
 }
@@ -543,7 +558,7 @@ static void decode_configs(struct cpuinfo_mips *c)
 	}
 	}
 
 
 #ifndef CONFIG_MIPS_CPS
 #ifndef CONFIG_MIPS_CPS
-	if (cpu_has_mips_r2) {
+	if (cpu_has_mips_r2_r6) {
 		c->core = get_ebase_cpunum();
 		c->core = get_ebase_cpunum();
 		if (cpu_has_mipsmt)
 		if (cpu_has_mipsmt)
 			c->core >>= fls(core_nvpes()) - 1;
 			c->core >>= fls(core_nvpes()) - 1;
@@ -898,6 +913,11 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
 {
 {
 	c->writecombine = _CACHE_UNCACHED_ACCELERATED;
 	c->writecombine = _CACHE_UNCACHED_ACCELERATED;
 	switch (c->processor_id & PRID_IMP_MASK) {
 	switch (c->processor_id & PRID_IMP_MASK) {
+	case PRID_IMP_QEMU_GENERIC:
+		c->writecombine = _CACHE_UNCACHED;
+		c->cputype = CPU_QEMU_GENERIC;
+		__cpu_name[cpu] = "MIPS GENERIC QEMU";
+		break;
 	case PRID_IMP_4KC:
 	case PRID_IMP_4KC:
 		c->cputype = CPU_4KC;
 		c->cputype = CPU_4KC;
 		c->writecombine = _CACHE_UNCACHED;
 		c->writecombine = _CACHE_UNCACHED;
@@ -1347,8 +1367,7 @@ void cpu_probe(void)
 	if (c->options & MIPS_CPU_FPU) {
 	if (c->options & MIPS_CPU_FPU) {
 		c->fpu_id = cpu_get_fpu_id();
 		c->fpu_id = cpu_get_fpu_id();
 
 
-		if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
-				    MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
+		if (c->isa_level & cpu_has_mips_r) {
 			if (c->fpu_id & MIPS_FPIR_3D)
 			if (c->fpu_id & MIPS_FPIR_3D)
 				c->ases |= MIPS_ASE_MIPS3D;
 				c->ases |= MIPS_ASE_MIPS3D;
 			if (c->fpu_id & MIPS_FPIR_FREP)
 			if (c->fpu_id & MIPS_FPIR_FREP)
@@ -1356,7 +1375,7 @@ void cpu_probe(void)
 		}
 		}
 	}
 	}
 
 
-	if (cpu_has_mips_r2) {
+	if (cpu_has_mips_r2_r6) {
 		c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
 		c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
 		/* R2 has Performance Counter Interrupt indicator */
 		/* R2 has Performance Counter Interrupt indicator */
 		c->options |= MIPS_CPU_PCI;
 		c->options |= MIPS_CPU_PCI;

+ 188 - 115
arch/mips/kernel/elf.c

@@ -11,29 +11,112 @@
 #include <linux/elf.h>
 #include <linux/elf.h>
 #include <linux/sched.h>
 #include <linux/sched.h>
 
 
+/* FPU modes */
 enum {
 enum {
-	FP_ERROR = -1,
-	FP_DOUBLE_64A = -2,
+	FP_FRE,
+	FP_FR0,
+	FP_FR1,
 };
 };
 
 
+/**
+ * struct mode_req - ABI FPU mode requirements
+ * @single:	The program being loaded needs an FPU but it will only issue
+ *		single precision instructions meaning that it can execute in
+ *		either FR0 or FR1.
+ * @soft:	The soft(-float) requirement means that the program being
+ *		loaded needs has no FPU dependency at all (i.e. it has no
+ *		FPU instructions).
+ * @fr1:	The program being loaded depends on FPU being in FR=1 mode.
+ * @frdefault:	The program being loaded depends on the default FPU mode.
+ *		That is FR0 for O32 and FR1 for N32/N64.
+ * @fre:	The program being loaded depends on FPU with FRE=1. This mode is
+ *		a bridge which uses FR=1 whilst still being able to maintain
+ *		full compatibility with pre-existing code using the O32 FP32
+ *		ABI.
+ *
+ * More information about the FP ABIs can be found here:
+ *
+ * https://dmz-portal.mips.com/wiki/MIPS_O32_ABI_-_FR0_and_FR1_Interlinking#10.4.1._Basic_mode_set-up
+ *
+ */
+
+struct mode_req {
+	bool single;
+	bool soft;
+	bool fr1;
+	bool frdefault;
+	bool fre;
+};
+
+static const struct mode_req fpu_reqs[] = {
+	[MIPS_ABI_FP_ANY]    = { true,  true,  true,  true,  true  },
+	[MIPS_ABI_FP_DOUBLE] = { false, false, false, true,  true  },
+	[MIPS_ABI_FP_SINGLE] = { true,  false, false, false, false },
+	[MIPS_ABI_FP_SOFT]   = { false, true,  false, false, false },
+	[MIPS_ABI_FP_OLD_64] = { false, false, false, false, false },
+	[MIPS_ABI_FP_XX]     = { false, false, true,  true,  true  },
+	[MIPS_ABI_FP_64]     = { false, false, true,  false, false },
+	[MIPS_ABI_FP_64A]    = { false, false, true,  false, true  }
+};
+
+/*
+ * Mode requirements when .MIPS.abiflags is not present in the ELF.
+ * Not present means that everything is acceptable except FR1.
+ */
+static struct mode_req none_req = { true, true, false, true, true };
+
 int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
 int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
 		     bool is_interp, struct arch_elf_state *state)
 		     bool is_interp, struct arch_elf_state *state)
 {
 {
-	struct elfhdr *ehdr = _ehdr;
-	struct elf_phdr *phdr = _phdr;
+	struct elf32_hdr *ehdr32 = _ehdr;
+	struct elf32_phdr *phdr32 = _phdr;
+	struct elf64_phdr *phdr64 = _phdr;
 	struct mips_elf_abiflags_v0 abiflags;
 	struct mips_elf_abiflags_v0 abiflags;
 	int ret;
 	int ret;
 
 
-	if (config_enabled(CONFIG_64BIT) &&
-	    (ehdr->e_ident[EI_CLASS] != ELFCLASS32))
-		return 0;
-	if (phdr->p_type != PT_MIPS_ABIFLAGS)
-		return 0;
-	if (phdr->p_filesz < sizeof(abiflags))
-		return -EINVAL;
+	/* Lets see if this is an O32 ELF */
+	if (ehdr32->e_ident[EI_CLASS] == ELFCLASS32) {
+		/* FR = 1 for N32 */
+		if (ehdr32->e_flags & EF_MIPS_ABI2)
+			state->overall_fp_mode = FP_FR1;
+		else
+			/* Set a good default FPU mode for O32 */
+			state->overall_fp_mode = cpu_has_mips_r6 ?
+				FP_FRE : FP_FR0;
+
+		if (ehdr32->e_flags & EF_MIPS_FP64) {
+			/*
+			 * Set MIPS_ABI_FP_OLD_64 for EF_MIPS_FP64. We will override it
+			 * later if needed
+			 */
+			if (is_interp)
+				state->interp_fp_abi = MIPS_ABI_FP_OLD_64;
+			else
+				state->fp_abi = MIPS_ABI_FP_OLD_64;
+		}
+		if (phdr32->p_type != PT_MIPS_ABIFLAGS)
+			return 0;
+
+		if (phdr32->p_filesz < sizeof(abiflags))
+			return -EINVAL;
+
+		ret = kernel_read(elf, phdr32->p_offset,
+				  (char *)&abiflags,
+				  sizeof(abiflags));
+	} else {
+		/* FR=1 is really the only option for 64-bit */
+		state->overall_fp_mode = FP_FR1;
+
+		if (phdr64->p_type != PT_MIPS_ABIFLAGS)
+			return 0;
+		if (phdr64->p_filesz < sizeof(abiflags))
+			return -EINVAL;
+
+		ret = kernel_read(elf, phdr64->p_offset,
+				  (char *)&abiflags,
+				  sizeof(abiflags));
+	}
 
 
-	ret = kernel_read(elf, phdr->p_offset, (char *)&abiflags,
-			  sizeof(abiflags));
 	if (ret < 0)
 	if (ret < 0)
 		return ret;
 		return ret;
 	if (ret != sizeof(abiflags))
 	if (ret != sizeof(abiflags))
@@ -48,35 +131,30 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
 	return 0;
 	return 0;
 }
 }
 
 
-static inline unsigned get_fp_abi(struct elfhdr *ehdr, int in_abi)
+static inline unsigned get_fp_abi(int in_abi)
 {
 {
 	/* If the ABI requirement is provided, simply return that */
 	/* If the ABI requirement is provided, simply return that */
-	if (in_abi != -1)
+	if (in_abi != MIPS_ABI_FP_UNKNOWN)
 		return in_abi;
 		return in_abi;
 
 
-	/* If the EF_MIPS_FP64 flag was set, return MIPS_ABI_FP_64 */
-	if (ehdr->e_flags & EF_MIPS_FP64)
-		return MIPS_ABI_FP_64;
-
-	/* Default to MIPS_ABI_FP_DOUBLE */
-	return MIPS_ABI_FP_DOUBLE;
+	/* Unknown ABI */
+	return MIPS_ABI_FP_UNKNOWN;
 }
 }
 
 
 int arch_check_elf(void *_ehdr, bool has_interpreter,
 int arch_check_elf(void *_ehdr, bool has_interpreter,
 		   struct arch_elf_state *state)
 		   struct arch_elf_state *state)
 {
 {
-	struct elfhdr *ehdr = _ehdr;
-	unsigned fp_abi, interp_fp_abi, abi0, abi1;
+	struct elf32_hdr *ehdr = _ehdr;
+	struct mode_req prog_req, interp_req;
+	int fp_abi, interp_fp_abi, abi0, abi1, max_abi;
 
 
-	/* Ignore non-O32 binaries */
-	if (config_enabled(CONFIG_64BIT) &&
-	    (ehdr->e_ident[EI_CLASS] != ELFCLASS32))
+	if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
 		return 0;
 		return 0;
 
 
-	fp_abi = get_fp_abi(ehdr, state->fp_abi);
+	fp_abi = get_fp_abi(state->fp_abi);
 
 
 	if (has_interpreter) {
 	if (has_interpreter) {
-		interp_fp_abi = get_fp_abi(ehdr, state->interp_fp_abi);
+		interp_fp_abi = get_fp_abi(state->interp_fp_abi);
 
 
 		abi0 = min(fp_abi, interp_fp_abi);
 		abi0 = min(fp_abi, interp_fp_abi);
 		abi1 = max(fp_abi, interp_fp_abi);
 		abi1 = max(fp_abi, interp_fp_abi);
@@ -84,108 +162,103 @@ int arch_check_elf(void *_ehdr, bool has_interpreter,
 		abi0 = abi1 = fp_abi;
 		abi0 = abi1 = fp_abi;
 	}
 	}
 
 
-	state->overall_abi = FP_ERROR;
-
-	if (abi0 == abi1) {
-		state->overall_abi = abi0;
-	} else if (abi0 == MIPS_ABI_FP_ANY) {
-		state->overall_abi = abi1;
-	} else if (abi0 == MIPS_ABI_FP_DOUBLE) {
-		switch (abi1) {
-		case MIPS_ABI_FP_XX:
-			state->overall_abi = MIPS_ABI_FP_DOUBLE;
-			break;
-
-		case MIPS_ABI_FP_64A:
-			state->overall_abi = FP_DOUBLE_64A;
-			break;
-		}
-	} else if (abi0 == MIPS_ABI_FP_SINGLE ||
-		   abi0 == MIPS_ABI_FP_SOFT) {
-		/* Cannot link with other ABIs */
-	} else if (abi0 == MIPS_ABI_FP_OLD_64) {
-		switch (abi1) {
-		case MIPS_ABI_FP_XX:
-		case MIPS_ABI_FP_64:
-		case MIPS_ABI_FP_64A:
-			state->overall_abi = MIPS_ABI_FP_64;
-			break;
-		}
-	} else if (abi0 == MIPS_ABI_FP_XX ||
-		   abi0 == MIPS_ABI_FP_64 ||
-		   abi0 == MIPS_ABI_FP_64A) {
-		state->overall_abi = MIPS_ABI_FP_64;
-	}
+	/* ABI limits. O32 = FP_64A, N32/N64 = FP_SOFT */
+	max_abi = ((ehdr->e_ident[EI_CLASS] == ELFCLASS32) &&
+		   (!(ehdr->e_flags & EF_MIPS_ABI2))) ?
+		MIPS_ABI_FP_64A : MIPS_ABI_FP_SOFT;
 
 
-	switch (state->overall_abi) {
-	case MIPS_ABI_FP_64:
-	case MIPS_ABI_FP_64A:
-	case FP_DOUBLE_64A:
-		if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
-			return -ELIBBAD;
-		break;
+	if ((abi0 > max_abi && abi0 != MIPS_ABI_FP_UNKNOWN) ||
+	    (abi1 > max_abi && abi1 != MIPS_ABI_FP_UNKNOWN))
+		return -ELIBBAD;
+
+	/* It's time to determine the FPU mode requirements */
+	prog_req = (abi0 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi0];
+	interp_req = (abi1 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi1];
 
 
-	case FP_ERROR:
+	/*
+	 * Check whether the program's and interp's ABIs have a matching FPU
+	 * mode requirement.
+	 */
+	prog_req.single = interp_req.single && prog_req.single;
+	prog_req.soft = interp_req.soft && prog_req.soft;
+	prog_req.fr1 = interp_req.fr1 && prog_req.fr1;
+	prog_req.frdefault = interp_req.frdefault && prog_req.frdefault;
+	prog_req.fre = interp_req.fre && prog_req.fre;
+
+	/*
+	 * Determine the desired FPU mode
+	 *
+	 * Decision making:
+	 *
+	 * - We want FR_FRE if FRE=1 and both FR=1 and FR=0 are false. This
+	 *   means that we have a combination of program and interpreter
+	 *   that inherently require the hybrid FP mode.
+	 * - If FR1 and FRDEFAULT is true, that means we hit the any-abi or
+	 *   fpxx case. This is because, in any-ABI (or no-ABI) we have no FPU
+	 *   instructions so we don't care about the mode. We will simply use
+	 *   the one preferred by the hardware. In fpxx case, that ABI can
+	 *   handle both FR=1 and FR=0, so, again, we simply choose the one
+	 *   preferred by the hardware. Next, if we only use single-precision
+	 *   FPU instructions, and the default ABI FPU mode is not good
+	 *   (ie single + any ABI combination), we set again the FPU mode to the
+	 *   one is preferred by the hardware. Next, if we know that the code
+	 *   will only use single-precision instructions, shown by single being
+	 *   true but frdefault being false, then we again set the FPU mode to
+	 *   the one that is preferred by the hardware.
+	 * - We want FP_FR1 if that's the only matching mode and the default one
+	 *   is not good.
+	 * - Return with -ELIBADD if we can't find a matching FPU mode.
+	 */
+	if (prog_req.fre && !prog_req.frdefault && !prog_req.fr1)
+		state->overall_fp_mode = FP_FRE;
+	else if ((prog_req.fr1 && prog_req.frdefault) ||
+		 (prog_req.single && !prog_req.frdefault))
+		/* Make sure 64-bit MIPS III/IV/64R1 will not pick FR1 */
+		state->overall_fp_mode = ((current_cpu_data.fpu_id & MIPS_FPIR_F64) &&
+					  cpu_has_mips_r2_r6) ?
+					  FP_FR1 : FP_FR0;
+	else if (prog_req.fr1)
+		state->overall_fp_mode = FP_FR1;
+	else  if (!prog_req.fre && !prog_req.frdefault &&
+		  !prog_req.fr1 && !prog_req.single && !prog_req.soft)
 		return -ELIBBAD;
 		return -ELIBBAD;
-	}
 
 
 	return 0;
 	return 0;
 }
 }
 
 
-void mips_set_personality_fp(struct arch_elf_state *state)
+static inline void set_thread_fp_mode(int hybrid, int regs32)
 {
 {
-	if (config_enabled(CONFIG_FP32XX_HYBRID_FPRS)) {
-		/*
-		 * Use hybrid FPRs for all code which can correctly execute
-		 * with that mode.
-		 */
-		switch (state->overall_abi) {
-		case MIPS_ABI_FP_DOUBLE:
-		case MIPS_ABI_FP_SINGLE:
-		case MIPS_ABI_FP_SOFT:
-		case MIPS_ABI_FP_XX:
-		case MIPS_ABI_FP_ANY:
-			/* FR=1, FRE=1 */
-			clear_thread_flag(TIF_32BIT_FPREGS);
-			set_thread_flag(TIF_HYBRID_FPREGS);
-			return;
-		}
-	}
-
-	switch (state->overall_abi) {
-	case MIPS_ABI_FP_DOUBLE:
-	case MIPS_ABI_FP_SINGLE:
-	case MIPS_ABI_FP_SOFT:
-		/* FR=0 */
-		set_thread_flag(TIF_32BIT_FPREGS);
+	if (hybrid)
+		set_thread_flag(TIF_HYBRID_FPREGS);
+	else
 		clear_thread_flag(TIF_HYBRID_FPREGS);
 		clear_thread_flag(TIF_HYBRID_FPREGS);
-		break;
-
-	case FP_DOUBLE_64A:
-		/* FR=1, FRE=1 */
+	if (regs32)
+		set_thread_flag(TIF_32BIT_FPREGS);
+	else
 		clear_thread_flag(TIF_32BIT_FPREGS);
 		clear_thread_flag(TIF_32BIT_FPREGS);
-		set_thread_flag(TIF_HYBRID_FPREGS);
-		break;
+}
 
 
-	case MIPS_ABI_FP_64:
-	case MIPS_ABI_FP_64A:
-		/* FR=1, FRE=0 */
-		clear_thread_flag(TIF_32BIT_FPREGS);
-		clear_thread_flag(TIF_HYBRID_FPREGS);
-		break;
+void mips_set_personality_fp(struct arch_elf_state *state)
+{
+	/*
+	 * This function is only ever called for O32 ELFs so we should
+	 * not be worried about N32/N64 binaries.
+	 */
 
 
-	case MIPS_ABI_FP_XX:
-	case MIPS_ABI_FP_ANY:
-		if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
-			set_thread_flag(TIF_32BIT_FPREGS);
-		else
-			clear_thread_flag(TIF_32BIT_FPREGS);
+	if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
+		return;
 
 
-		clear_thread_flag(TIF_HYBRID_FPREGS);
+	switch (state->overall_fp_mode) {
+	case FP_FRE:
+		set_thread_fp_mode(1, 0);
+		break;
+	case FP_FR0:
+		set_thread_fp_mode(0, 1);
+		break;
+	case FP_FR1:
+		set_thread_fp_mode(0, 0);
 		break;
 		break;
-
 	default:
 	default:
-	case FP_ERROR:
 		BUG();
 		BUG();
 	}
 	}
 }
 }

+ 21 - 2
arch/mips/kernel/entry.S

@@ -46,6 +46,11 @@ resume_userspace:
 	local_irq_disable		# make sure we dont miss an
 	local_irq_disable		# make sure we dont miss an
 					# interrupt setting need_resched
 					# interrupt setting need_resched
 					# between sampling and return
 					# between sampling and return
+#ifdef CONFIG_MIPSR2_TO_R6_EMULATOR
+	lw	k0, TI_R2_EMUL_RET($28)
+	bnez	k0, restore_all_from_r2_emul
+#endif
+
 	LONG_L	a2, TI_FLAGS($28)	# current->work
 	LONG_L	a2, TI_FLAGS($28)	# current->work
 	andi	t0, a2, _TIF_WORK_MASK	# (ignoring syscall_trace)
 	andi	t0, a2, _TIF_WORK_MASK	# (ignoring syscall_trace)
 	bnez	t0, work_pending
 	bnez	t0, work_pending
@@ -114,6 +119,19 @@ restore_partial:		# restore partial frame
 	RESTORE_SP_AND_RET
 	RESTORE_SP_AND_RET
 	.set	at
 	.set	at
 
 
+#ifdef CONFIG_MIPSR2_TO_R6_EMULATOR
+restore_all_from_r2_emul:			# restore full frame
+	.set	noat
+	sw	zero, TI_R2_EMUL_RET($28)	# reset it
+	RESTORE_TEMP
+	RESTORE_AT
+	RESTORE_STATIC
+	RESTORE_SOME
+	LONG_L	sp, PT_R29(sp)
+	eretnc
+	.set	at
+#endif
+
 work_pending:
 work_pending:
 	andi	t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
 	andi	t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
 	beqz	t0, work_notifysig
 	beqz	t0, work_notifysig
@@ -158,7 +176,8 @@ syscall_exit_work:
 	jal	syscall_trace_leave
 	jal	syscall_trace_leave
 	b	resume_userspace
 	b	resume_userspace
 
 
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT)
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) || \
+    defined(CONFIG_MIPS_MT)
 
 
 /*
 /*
  * MIPS32R2 Instruction Hazard Barrier - must be called
  * MIPS32R2 Instruction Hazard Barrier - must be called
@@ -171,4 +190,4 @@ LEAF(mips_ihb)
 	nop
 	nop
 	END(mips_ihb)
 	END(mips_ihb)
 
 
-#endif /* CONFIG_CPU_MIPSR2 or CONFIG_MIPS_MT */
+#endif /* CONFIG_CPU_MIPSR2 or CONFIG_CPU_MIPSR6 or CONFIG_MIPS_MT */

+ 1 - 1
arch/mips/kernel/genex.S

@@ -125,7 +125,7 @@ LEAF(__r4k_wait)
 	nop
 	nop
 	nop
 	nop
 #endif
 #endif
-	.set	arch=r4000
+	.set	MIPS_ISA_ARCH_LEVEL_RAW
 	wait
 	wait
 	/* end of rollback region (the region size must be power of two) */
 	/* end of rollback region (the region size must be power of two) */
 1:
 1:

+ 1 - 0
arch/mips/kernel/idle.c

@@ -186,6 +186,7 @@ void __init check_wait(void)
 	case CPU_PROAPTIV:
 	case CPU_PROAPTIV:
 	case CPU_P5600:
 	case CPU_P5600:
 	case CPU_M5150:
 	case CPU_M5150:
+	case CPU_QEMU_GENERIC:
 		cpu_wait = r4k_wait;
 		cpu_wait = r4k_wait;
 		if (read_c0_config7() & MIPS_CONF7_WII)
 		if (read_c0_config7() & MIPS_CONF7_WII)
 			cpu_wait = r4k_wait_irqoff;
 			cpu_wait = r4k_wait_irqoff;

+ 2378 - 0
arch/mips/kernel/mips-r2-to-r6-emul.c

@@ -0,0 +1,2378 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2014 Imagination Technologies Ltd.
+ * Author: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com>
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *      MIPS R2 user space instruction emulator for MIPS R6
+ *
+ */
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/debugfs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/seq_file.h>
+
+#include <asm/asm.h>
+#include <asm/branch.h>
+#include <asm/break.h>
+#include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
+#include <asm/inst.h>
+#include <asm/mips-r2-to-r6-emul.h>
+#include <asm/local.h>
+#include <asm/ptrace.h>
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_64BIT
+#define ADDIU	"daddiu "
+#define INS	"dins "
+#define EXT	"dext "
+#else
+#define ADDIU	"addiu "
+#define INS	"ins "
+#define EXT	"ext "
+#endif /* CONFIG_64BIT */
+
+#define SB	"sb "
+#define LB	"lb "
+#define LL	"ll "
+#define SC	"sc "
+
+DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2emustats);
+DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2bdemustats);
+DEFINE_PER_CPU(struct mips_r2br_emulator_stats, mipsr2bremustats);
+
+extern const unsigned int fpucondbit[8];
+
+#define MIPS_R2_EMUL_TOTAL_PASS	10
+
+int mipsr2_emulation = 0;
+
+static int __init mipsr2emu_enable(char *s)
+{
+	mipsr2_emulation = 1;
+
+	pr_info("MIPS R2-to-R6 Emulator Enabled!");
+
+	return 1;
+}
+__setup("mipsr2emu", mipsr2emu_enable);
+
+/**
+ * mipsr6_emul - Emulate some frequent R2/R5/R6 instructions in delay slot
+ * for performance instead of the traditional way of using a stack trampoline
+ * which is rather slow.
+ * @regs: Process register set
+ * @ir: Instruction
+ */
+static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
+{
+	switch (MIPSInst_OPCODE(ir)) {
+	case addiu_op:
+		if (MIPSInst_RT(ir))
+			regs->regs[MIPSInst_RT(ir)] =
+				(s32)regs->regs[MIPSInst_RS(ir)] +
+				(s32)MIPSInst_SIMM(ir);
+		return 0;
+	case daddiu_op:
+		if (config_enabled(CONFIG_32BIT))
+			break;
+
+		if (MIPSInst_RT(ir))
+			regs->regs[MIPSInst_RT(ir)] =
+				(s64)regs->regs[MIPSInst_RS(ir)] +
+				(s64)MIPSInst_SIMM(ir);
+		return 0;
+	case lwc1_op:
+	case swc1_op:
+	case cop1_op:
+	case cop1x_op:
+		/* FPU instructions in delay slot */
+		return -SIGFPE;
+	case spec_op:
+		switch (MIPSInst_FUNC(ir)) {
+		case or_op:
+			if (MIPSInst_RD(ir))
+				regs->regs[MIPSInst_RD(ir)] =
+					regs->regs[MIPSInst_RS(ir)] |
+					regs->regs[MIPSInst_RT(ir)];
+			return 0;
+		case sll_op:
+			if (MIPSInst_RS(ir))
+				break;
+
+			if (MIPSInst_RD(ir))
+				regs->regs[MIPSInst_RD(ir)] =
+					(s32)(((u32)regs->regs[MIPSInst_RT(ir)]) <<
+						MIPSInst_FD(ir));
+			return 0;
+		case srl_op:
+			if (MIPSInst_RS(ir))
+				break;
+
+			if (MIPSInst_RD(ir))
+				regs->regs[MIPSInst_RD(ir)] =
+					(s32)(((u32)regs->regs[MIPSInst_RT(ir)]) >>
+						MIPSInst_FD(ir));
+			return 0;
+		case addu_op:
+			if (MIPSInst_FD(ir))
+				break;
+
+			if (MIPSInst_RD(ir))
+				regs->regs[MIPSInst_RD(ir)] =
+					(s32)((u32)regs->regs[MIPSInst_RS(ir)] +
+					      (u32)regs->regs[MIPSInst_RT(ir)]);
+			return 0;
+		case subu_op:
+			if (MIPSInst_FD(ir))
+				break;
+
+			if (MIPSInst_RD(ir))
+				regs->regs[MIPSInst_RD(ir)] =
+					(s32)((u32)regs->regs[MIPSInst_RS(ir)] -
+					      (u32)regs->regs[MIPSInst_RT(ir)]);
+			return 0;
+		case dsll_op:
+			if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir))
+				break;
+
+			if (MIPSInst_RD(ir))
+				regs->regs[MIPSInst_RD(ir)] =
+					(s64)(((u64)regs->regs[MIPSInst_RT(ir)]) <<
+						MIPSInst_FD(ir));
+			return 0;
+		case dsrl_op:
+			if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir))
+				break;
+
+			if (MIPSInst_RD(ir))
+				regs->regs[MIPSInst_RD(ir)] =
+					(s64)(((u64)regs->regs[MIPSInst_RT(ir)]) >>
+						MIPSInst_FD(ir));
+			return 0;
+		case daddu_op:
+			if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir))
+				break;
+
+			if (MIPSInst_RD(ir))
+				regs->regs[MIPSInst_RD(ir)] =
+					(u64)regs->regs[MIPSInst_RS(ir)] +
+					(u64)regs->regs[MIPSInst_RT(ir)];
+			return 0;
+		case dsubu_op:
+			if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir))
+				break;
+
+			if (MIPSInst_RD(ir))
+				regs->regs[MIPSInst_RD(ir)] =
+					(s64)((u64)regs->regs[MIPSInst_RS(ir)] -
+					      (u64)regs->regs[MIPSInst_RT(ir)]);
+			return 0;
+		}
+		break;
+	default:
+		pr_debug("No fastpath BD emulation for instruction 0x%08x (op: %02x)\n",
+			 ir, MIPSInst_OPCODE(ir));
+	}
+
+	return SIGILL;
+}
+
+/**
+ * movt_func - Emulate a MOVT instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int movf_func(struct pt_regs *regs, u32 ir)
+{
+	u32 csr;
+	u32 cond;
+
+	csr = current->thread.fpu.fcr31;
+	cond = fpucondbit[MIPSInst_RT(ir) >> 2];
+	if (((csr & cond) == 0) && MIPSInst_RD(ir))
+		regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+	MIPS_R2_STATS(movs);
+	return 0;
+}
+
+/**
+ * movt_func - Emulate a MOVT instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int movt_func(struct pt_regs *regs, u32 ir)
+{
+	u32 csr;
+	u32 cond;
+
+	csr = current->thread.fpu.fcr31;
+	cond = fpucondbit[MIPSInst_RT(ir) >> 2];
+
+	if (((csr & cond) != 0) && MIPSInst_RD(ir))
+		regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+
+	MIPS_R2_STATS(movs);
+
+	return 0;
+}
+
+/**
+ * jr_func - Emulate a JR instruction.
+ * @pt_regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns SIGILL if JR was in delay slot, SIGEMT if we
+ * can't compute the EPC, SIGSEGV if we can't access the
+ * userland instruction or 0 on success.
+ */
+static int jr_func(struct pt_regs *regs, u32 ir)
+{
+	int err;
+	unsigned long cepc, epc, nepc;
+	u32 nir;
+
+	if (delay_slot(regs))
+		return SIGILL;
+
+	/* EPC after the RI/JR instruction */
+	nepc = regs->cp0_epc;
+	/* Roll back to the reserved R2 JR instruction */
+	regs->cp0_epc -= 4;
+	epc = regs->cp0_epc;
+	err = __compute_return_epc(regs);
+
+	if (err < 0)
+		return SIGEMT;
+
+
+	/* Computed EPC */
+	cepc = regs->cp0_epc;
+
+	/* Get DS instruction */
+	err = __get_user(nir, (u32 __user *)nepc);
+	if (err)
+		return SIGSEGV;
+
+	MIPS_R2BR_STATS(jrs);
+
+	/* If nir == 0(NOP), then nothing else to do */
+	if (nir) {
+		/*
+		 * Negative err means FPU instruction in BD-slot,
+		 * Zero err means 'BD-slot emulation done'
+		 * For anything else we go back to trampoline emulation.
+		 */
+		err = mipsr6_emul(regs, nir);
+		if (err > 0) {
+			regs->cp0_epc = nepc;
+			err = mips_dsemul(regs, nir, cepc);
+			if (err == SIGILL)
+				err = SIGEMT;
+			MIPS_R2_STATS(dsemul);
+		}
+	}
+
+	return err;
+}
+
+/**
+ * movz_func - Emulate a MOVZ instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int movz_func(struct pt_regs *regs, u32 ir)
+{
+	if (((regs->regs[MIPSInst_RT(ir)]) == 0) && MIPSInst_RD(ir))
+		regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+	MIPS_R2_STATS(movs);
+
+	return 0;
+}
+
+/**
+ * movn_func - Emulate a MOVZ instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int movn_func(struct pt_regs *regs, u32 ir)
+{
+	if (((regs->regs[MIPSInst_RT(ir)]) != 0) && MIPSInst_RD(ir))
+		regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+	MIPS_R2_STATS(movs);
+
+	return 0;
+}
+
+/**
+ * mfhi_func - Emulate a MFHI instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mfhi_func(struct pt_regs *regs, u32 ir)
+{
+	if (MIPSInst_RD(ir))
+		regs->regs[MIPSInst_RD(ir)] = regs->hi;
+
+	MIPS_R2_STATS(hilo);
+
+	return 0;
+}
+
+/**
+ * mthi_func - Emulate a MTHI instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mthi_func(struct pt_regs *regs, u32 ir)
+{
+	regs->hi = regs->regs[MIPSInst_RS(ir)];
+
+	MIPS_R2_STATS(hilo);
+
+	return 0;
+}
+
+/**
+ * mflo_func - Emulate a MFLO instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mflo_func(struct pt_regs *regs, u32 ir)
+{
+	if (MIPSInst_RD(ir))
+		regs->regs[MIPSInst_RD(ir)] = regs->lo;
+
+	MIPS_R2_STATS(hilo);
+
+	return 0;
+}
+
+/**
+ * mtlo_func - Emulate a MTLO instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mtlo_func(struct pt_regs *regs, u32 ir)
+{
+	regs->lo = regs->regs[MIPSInst_RS(ir)];
+
+	MIPS_R2_STATS(hilo);
+
+	return 0;
+}
+
+/**
+ * mult_func - Emulate a MULT instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mult_func(struct pt_regs *regs, u32 ir)
+{
+	s64 res;
+	s32 rt, rs;
+
+	rt = regs->regs[MIPSInst_RT(ir)];
+	rs = regs->regs[MIPSInst_RS(ir)];
+	res = (s64)rt * (s64)rs;
+
+	rs = res;
+	regs->lo = (s64)rs;
+	rt = res >> 32;
+	res = (s64)rt;
+	regs->hi = res;
+
+	MIPS_R2_STATS(muls);
+
+	return 0;
+}
+
+/**
+ * multu_func - Emulate a MULTU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int multu_func(struct pt_regs *regs, u32 ir)
+{
+	u64 res;
+	u32 rt, rs;
+
+	rt = regs->regs[MIPSInst_RT(ir)];
+	rs = regs->regs[MIPSInst_RS(ir)];
+	res = (u64)rt * (u64)rs;
+	rt = res;
+	regs->lo = (s64)rt;
+	regs->hi = (s64)(res >> 32);
+
+	MIPS_R2_STATS(muls);
+
+	return 0;
+}
+
+/**
+ * div_func - Emulate a DIV instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int div_func(struct pt_regs *regs, u32 ir)
+{
+	s32 rt, rs;
+
+	rt = regs->regs[MIPSInst_RT(ir)];
+	rs = regs->regs[MIPSInst_RS(ir)];
+
+	regs->lo = (s64)(rs / rt);
+	regs->hi = (s64)(rs % rt);
+
+	MIPS_R2_STATS(divs);
+
+	return 0;
+}
+
+/**
+ * divu_func - Emulate a DIVU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int divu_func(struct pt_regs *regs, u32 ir)
+{
+	u32 rt, rs;
+
+	rt = regs->regs[MIPSInst_RT(ir)];
+	rs = regs->regs[MIPSInst_RS(ir)];
+
+	regs->lo = (s64)(rs / rt);
+	regs->hi = (s64)(rs % rt);
+
+	MIPS_R2_STATS(divs);
+
+	return 0;
+}
+
+/**
+ * dmult_func - Emulate a DMULT instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 on success or SIGILL for 32-bit kernels.
+ */
+static int dmult_func(struct pt_regs *regs, u32 ir)
+{
+	s64 res;
+	s64 rt, rs;
+
+	if (config_enabled(CONFIG_32BIT))
+		return SIGILL;
+
+	rt = regs->regs[MIPSInst_RT(ir)];
+	rs = regs->regs[MIPSInst_RS(ir)];
+	res = rt * rs;
+
+	regs->lo = res;
+	__asm__ __volatile__(
+		"dmuh %0, %1, %2\t\n"
+		: "=r"(res)
+		: "r"(rt), "r"(rs));
+
+	regs->hi = res;
+
+	MIPS_R2_STATS(muls);
+
+	return 0;
+}
+
+/**
+ * dmultu_func - Emulate a DMULTU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 on success or SIGILL for 32-bit kernels.
+ */
+static int dmultu_func(struct pt_regs *regs, u32 ir)
+{
+	u64 res;
+	u64 rt, rs;
+
+	if (config_enabled(CONFIG_32BIT))
+		return SIGILL;
+
+	rt = regs->regs[MIPSInst_RT(ir)];
+	rs = regs->regs[MIPSInst_RS(ir)];
+	res = rt * rs;
+
+	regs->lo = res;
+	__asm__ __volatile__(
+		"dmuhu %0, %1, %2\t\n"
+		: "=r"(res)
+		: "r"(rt), "r"(rs));
+
+	regs->hi = res;
+
+	MIPS_R2_STATS(muls);
+
+	return 0;
+}
+
+/**
+ * ddiv_func - Emulate a DDIV instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 on success or SIGILL for 32-bit kernels.
+ */
+static int ddiv_func(struct pt_regs *regs, u32 ir)
+{
+	s64 rt, rs;
+
+	if (config_enabled(CONFIG_32BIT))
+		return SIGILL;
+
+	rt = regs->regs[MIPSInst_RT(ir)];
+	rs = regs->regs[MIPSInst_RS(ir)];
+
+	regs->lo = rs / rt;
+	regs->hi = rs % rt;
+
+	MIPS_R2_STATS(divs);
+
+	return 0;
+}
+
+/**
+ * ddivu_func - Emulate a DDIVU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 on success or SIGILL for 32-bit kernels.
+ */
+static int ddivu_func(struct pt_regs *regs, u32 ir)
+{
+	u64 rt, rs;
+
+	if (config_enabled(CONFIG_32BIT))
+		return SIGILL;
+
+	rt = regs->regs[MIPSInst_RT(ir)];
+	rs = regs->regs[MIPSInst_RS(ir)];
+
+	regs->lo = rs / rt;
+	regs->hi = rs % rt;
+
+	MIPS_R2_STATS(divs);
+
+	return 0;
+}
+
+/* R6 removed instructions for the SPECIAL opcode */
+static struct r2_decoder_table spec_op_table[] = {
+	{ 0xfc1ff83f, 0x00000008, jr_func },
+	{ 0xfc00ffff, 0x00000018, mult_func },
+	{ 0xfc00ffff, 0x00000019, multu_func },
+	{ 0xfc00ffff, 0x0000001c, dmult_func },
+	{ 0xfc00ffff, 0x0000001d, dmultu_func },
+	{ 0xffff07ff, 0x00000010, mfhi_func },
+	{ 0xfc1fffff, 0x00000011, mthi_func },
+	{ 0xffff07ff, 0x00000012, mflo_func },
+	{ 0xfc1fffff, 0x00000013, mtlo_func },
+	{ 0xfc0307ff, 0x00000001, movf_func },
+	{ 0xfc0307ff, 0x00010001, movt_func },
+	{ 0xfc0007ff, 0x0000000a, movz_func },
+	{ 0xfc0007ff, 0x0000000b, movn_func },
+	{ 0xfc00ffff, 0x0000001a, div_func },
+	{ 0xfc00ffff, 0x0000001b, divu_func },
+	{ 0xfc00ffff, 0x0000001e, ddiv_func },
+	{ 0xfc00ffff, 0x0000001f, ddivu_func },
+	{}
+};
+
+/**
+ * madd_func - Emulate a MADD instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int madd_func(struct pt_regs *regs, u32 ir)
+{
+	s64 res;
+	s32 rt, rs;
+
+	rt = regs->regs[MIPSInst_RT(ir)];
+	rs = regs->regs[MIPSInst_RS(ir)];
+	res = (s64)rt * (s64)rs;
+	rt = regs->hi;
+	rs = regs->lo;
+	res += ((((s64)rt) << 32) | (u32)rs);
+
+	rt = res;
+	regs->lo = (s64)rt;
+	rs = res >> 32;
+	regs->hi = (s64)rs;
+
+	MIPS_R2_STATS(dsps);
+
+	return 0;
+}
+
+/**
+ * maddu_func - Emulate a MADDU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int maddu_func(struct pt_regs *regs, u32 ir)
+{
+	u64 res;
+	u32 rt, rs;
+
+	rt = regs->regs[MIPSInst_RT(ir)];
+	rs = regs->regs[MIPSInst_RS(ir)];
+	res = (u64)rt * (u64)rs;
+	rt = regs->hi;
+	rs = regs->lo;
+	res += ((((s64)rt) << 32) | (u32)rs);
+
+	rt = res;
+	regs->lo = (s64)rt;
+	rs = res >> 32;
+	regs->hi = (s64)rs;
+
+	MIPS_R2_STATS(dsps);
+
+	return 0;
+}
+
+/**
+ * msub_func - Emulate a MSUB instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int msub_func(struct pt_regs *regs, u32 ir)
+{
+	s64 res;
+	s32 rt, rs;
+
+	rt = regs->regs[MIPSInst_RT(ir)];
+	rs = regs->regs[MIPSInst_RS(ir)];
+	res = (s64)rt * (s64)rs;
+	rt = regs->hi;
+	rs = regs->lo;
+	res = ((((s64)rt) << 32) | (u32)rs) - res;
+
+	rt = res;
+	regs->lo = (s64)rt;
+	rs = res >> 32;
+	regs->hi = (s64)rs;
+
+	MIPS_R2_STATS(dsps);
+
+	return 0;
+}
+
+/**
+ * msubu_func - Emulate a MSUBU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int msubu_func(struct pt_regs *regs, u32 ir)
+{
+	u64 res;
+	u32 rt, rs;
+
+	rt = regs->regs[MIPSInst_RT(ir)];
+	rs = regs->regs[MIPSInst_RS(ir)];
+	res = (u64)rt * (u64)rs;
+	rt = regs->hi;
+	rs = regs->lo;
+	res = ((((s64)rt) << 32) | (u32)rs) - res;
+
+	rt = res;
+	regs->lo = (s64)rt;
+	rs = res >> 32;
+	regs->hi = (s64)rs;
+
+	MIPS_R2_STATS(dsps);
+
+	return 0;
+}
+
+/**
+ * mul_func - Emulate a MUL instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mul_func(struct pt_regs *regs, u32 ir)
+{
+	s64 res;
+	s32 rt, rs;
+
+	if (!MIPSInst_RD(ir))
+		return 0;
+	rt = regs->regs[MIPSInst_RT(ir)];
+	rs = regs->regs[MIPSInst_RS(ir)];
+	res = (s64)rt * (s64)rs;
+
+	rs = res;
+	regs->regs[MIPSInst_RD(ir)] = (s64)rs;
+
+	MIPS_R2_STATS(muls);
+
+	return 0;
+}
+
+/**
+ * clz_func - Emulate a CLZ instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int clz_func(struct pt_regs *regs, u32 ir)
+{
+	u32 res;
+	u32 rs;
+
+	if (!MIPSInst_RD(ir))
+		return 0;
+
+	rs = regs->regs[MIPSInst_RS(ir)];
+	__asm__ __volatile__("clz %0, %1" : "=r"(res) : "r"(rs));
+	regs->regs[MIPSInst_RD(ir)] = res;
+
+	MIPS_R2_STATS(bops);
+
+	return 0;
+}
+
+/**
+ * clo_func - Emulate a CLO instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+
+static int clo_func(struct pt_regs *regs, u32 ir)
+{
+	u32 res;
+	u32 rs;
+
+	if (!MIPSInst_RD(ir))
+		return 0;
+
+	rs = regs->regs[MIPSInst_RS(ir)];
+	__asm__ __volatile__("clo %0, %1" : "=r"(res) : "r"(rs));
+	regs->regs[MIPSInst_RD(ir)] = res;
+
+	MIPS_R2_STATS(bops);
+
+	return 0;
+}
+
+/**
+ * dclz_func - Emulate a DCLZ instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int dclz_func(struct pt_regs *regs, u32 ir)
+{
+	u64 res;
+	u64 rs;
+
+	if (config_enabled(CONFIG_32BIT))
+		return SIGILL;
+
+	if (!MIPSInst_RD(ir))
+		return 0;
+
+	rs = regs->regs[MIPSInst_RS(ir)];
+	__asm__ __volatile__("dclz %0, %1" : "=r"(res) : "r"(rs));
+	regs->regs[MIPSInst_RD(ir)] = res;
+
+	MIPS_R2_STATS(bops);
+
+	return 0;
+}
+
+/**
+ * dclo_func - Emulate a DCLO instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int dclo_func(struct pt_regs *regs, u32 ir)
+{
+	u64 res;
+	u64 rs;
+
+	if (config_enabled(CONFIG_32BIT))
+		return SIGILL;
+
+	if (!MIPSInst_RD(ir))
+		return 0;
+
+	rs = regs->regs[MIPSInst_RS(ir)];
+	__asm__ __volatile__("dclo %0, %1" : "=r"(res) : "r"(rs));
+	regs->regs[MIPSInst_RD(ir)] = res;
+
+	MIPS_R2_STATS(bops);
+
+	return 0;
+}
+
+/* R6 removed instructions for the SPECIAL2 opcode */
+static struct r2_decoder_table spec2_op_table[] = {
+	{ 0xfc00ffff, 0x70000000, madd_func },
+	{ 0xfc00ffff, 0x70000001, maddu_func },
+	{ 0xfc0007ff, 0x70000002, mul_func },
+	{ 0xfc00ffff, 0x70000004, msub_func },
+	{ 0xfc00ffff, 0x70000005, msubu_func },
+	{ 0xfc0007ff, 0x70000020, clz_func },
+	{ 0xfc0007ff, 0x70000021, clo_func },
+	{ 0xfc0007ff, 0x70000024, dclz_func },
+	{ 0xfc0007ff, 0x70000025, dclo_func },
+	{ }
+};
+
+static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst,
+				      struct r2_decoder_table *table)
+{
+	struct r2_decoder_table *p;
+	int err;
+
+	for (p = table; p->func; p++) {
+		if ((inst & p->mask) == p->code) {
+			err = (p->func)(regs, inst);
+			return err;
+		}
+	}
+	return SIGILL;
+}
+
+/**
+ * mipsr2_decoder: Decode and emulate a MIPS R2 instruction
+ * @regs: Process register set
+ * @inst: Instruction to decode and emulate
+ */
+int mipsr2_decoder(struct pt_regs *regs, u32 inst)
+{
+	int err = 0;
+	unsigned long vaddr;
+	u32 nir;
+	unsigned long cpc, epc, nepc, r31, res, rs, rt;
+
+	void __user *fault_addr = NULL;
+	int pass = 0;
+
+repeat:
+	r31 = regs->regs[31];
+	epc = regs->cp0_epc;
+	err = compute_return_epc(regs);
+	if (err < 0) {
+		BUG();
+		return SIGEMT;
+	}
+	pr_debug("Emulating the 0x%08x R2 instruction @ 0x%08lx (pass=%d))\n",
+		 inst, epc, pass);
+
+	switch (MIPSInst_OPCODE(inst)) {
+	case spec_op:
+		err = mipsr2_find_op_func(regs, inst, spec_op_table);
+		if (err < 0) {
+			/* FPU instruction under JR */
+			regs->cp0_cause |= CAUSEF_BD;
+			goto fpu_emul;
+		}
+		break;
+	case spec2_op:
+		err = mipsr2_find_op_func(regs, inst, spec2_op_table);
+		break;
+	case bcond_op:
+		rt = MIPSInst_RT(inst);
+		rs = MIPSInst_RS(inst);
+		switch (rt) {
+		case tgei_op:
+			if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst))
+				do_trap_or_bp(regs, 0, "TGEI");
+
+			MIPS_R2_STATS(traps);
+
+			break;
+		case tgeiu_op:
+			if (regs->regs[rs] >= MIPSInst_UIMM(inst))
+				do_trap_or_bp(regs, 0, "TGEIU");
+
+			MIPS_R2_STATS(traps);
+
+			break;
+		case tlti_op:
+			if ((long)regs->regs[rs] < MIPSInst_SIMM(inst))
+				do_trap_or_bp(regs, 0, "TLTI");
+
+			MIPS_R2_STATS(traps);
+
+			break;
+		case tltiu_op:
+			if (regs->regs[rs] < MIPSInst_UIMM(inst))
+				do_trap_or_bp(regs, 0, "TLTIU");
+
+			MIPS_R2_STATS(traps);
+
+			break;
+		case teqi_op:
+			if (regs->regs[rs] == MIPSInst_SIMM(inst))
+				do_trap_or_bp(regs, 0, "TEQI");
+
+			MIPS_R2_STATS(traps);
+
+			break;
+		case tnei_op:
+			if (regs->regs[rs] != MIPSInst_SIMM(inst))
+				do_trap_or_bp(regs, 0, "TNEI");
+
+			MIPS_R2_STATS(traps);
+
+			break;
+		case bltzl_op:
+		case bgezl_op:
+		case bltzall_op:
+		case bgezall_op:
+			if (delay_slot(regs)) {
+				err = SIGILL;
+				break;
+			}
+			regs->regs[31] = r31;
+			regs->cp0_epc = epc;
+			err = __compute_return_epc(regs);
+			if (err < 0)
+				return SIGEMT;
+			if (err != BRANCH_LIKELY_TAKEN)
+				break;
+			cpc = regs->cp0_epc;
+			nepc = epc + 4;
+			err = __get_user(nir, (u32 __user *)nepc);
+			if (err) {
+				err = SIGSEGV;
+				break;
+			}
+			/*
+			 * This will probably be optimized away when
+			 * CONFIG_DEBUG_FS is not enabled
+			 */
+			switch (rt) {
+			case bltzl_op:
+				MIPS_R2BR_STATS(bltzl);
+				break;
+			case bgezl_op:
+				MIPS_R2BR_STATS(bgezl);
+				break;
+			case bltzall_op:
+				MIPS_R2BR_STATS(bltzall);
+				break;
+			case bgezall_op:
+				MIPS_R2BR_STATS(bgezall);
+				break;
+			}
+
+			switch (MIPSInst_OPCODE(nir)) {
+			case cop1_op:
+			case cop1x_op:
+			case lwc1_op:
+			case swc1_op:
+				regs->cp0_cause |= CAUSEF_BD;
+				goto fpu_emul;
+			}
+			if (nir) {
+				err = mipsr6_emul(regs, nir);
+				if (err > 0) {
+					err = mips_dsemul(regs, nir, cpc);
+					if (err == SIGILL)
+						err = SIGEMT;
+					MIPS_R2_STATS(dsemul);
+				}
+			}
+			break;
+		case bltzal_op:
+		case bgezal_op:
+			if (delay_slot(regs)) {
+				err = SIGILL;
+				break;
+			}
+			regs->regs[31] = r31;
+			regs->cp0_epc = epc;
+			err = __compute_return_epc(regs);
+			if (err < 0)
+				return SIGEMT;
+			cpc = regs->cp0_epc;
+			nepc = epc + 4;
+			err = __get_user(nir, (u32 __user *)nepc);
+			if (err) {
+				err = SIGSEGV;
+				break;
+			}
+			/*
+			 * This will probably be optimized away when
+			 * CONFIG_DEBUG_FS is not enabled
+			 */
+			switch (rt) {
+			case bltzal_op:
+				MIPS_R2BR_STATS(bltzal);
+				break;
+			case bgezal_op:
+				MIPS_R2BR_STATS(bgezal);
+				break;
+			}
+
+			switch (MIPSInst_OPCODE(nir)) {
+			case cop1_op:
+			case cop1x_op:
+			case lwc1_op:
+			case swc1_op:
+				regs->cp0_cause |= CAUSEF_BD;
+				goto fpu_emul;
+			}
+			if (nir) {
+				err = mipsr6_emul(regs, nir);
+				if (err > 0) {
+					err = mips_dsemul(regs, nir, cpc);
+					if (err == SIGILL)
+						err = SIGEMT;
+					MIPS_R2_STATS(dsemul);
+				}
+			}
+			break;
+		default:
+			regs->regs[31] = r31;
+			regs->cp0_epc = epc;
+			err = SIGILL;
+			break;
+		}
+		break;
+
+	case beql_op:
+	case bnel_op:
+	case blezl_op:
+	case bgtzl_op:
+		if (delay_slot(regs)) {
+			err = SIGILL;
+			break;
+		}
+		regs->regs[31] = r31;
+		regs->cp0_epc = epc;
+		err = __compute_return_epc(regs);
+		if (err < 0)
+			return SIGEMT;
+		if (err != BRANCH_LIKELY_TAKEN)
+			break;
+		cpc = regs->cp0_epc;
+		nepc = epc + 4;
+		err = __get_user(nir, (u32 __user *)nepc);
+		if (err) {
+			err = SIGSEGV;
+			break;
+		}
+		/*
+		 * This will probably be optimized away when
+		 * CONFIG_DEBUG_FS is not enabled
+		 */
+		switch (MIPSInst_OPCODE(inst)) {
+		case beql_op:
+			MIPS_R2BR_STATS(beql);
+			break;
+		case bnel_op:
+			MIPS_R2BR_STATS(bnel);
+			break;
+		case blezl_op:
+			MIPS_R2BR_STATS(blezl);
+			break;
+		case bgtzl_op:
+			MIPS_R2BR_STATS(bgtzl);
+			break;
+		}
+
+		switch (MIPSInst_OPCODE(nir)) {
+		case cop1_op:
+		case cop1x_op:
+		case lwc1_op:
+		case swc1_op:
+			regs->cp0_cause |= CAUSEF_BD;
+			goto fpu_emul;
+		}
+		if (nir) {
+			err = mipsr6_emul(regs, nir);
+			if (err > 0) {
+				err = mips_dsemul(regs, nir, cpc);
+				if (err == SIGILL)
+					err = SIGEMT;
+				MIPS_R2_STATS(dsemul);
+			}
+		}
+		break;
+	case lwc1_op:
+	case swc1_op:
+	case cop1_op:
+	case cop1x_op:
+fpu_emul:
+		regs->regs[31] = r31;
+		regs->cp0_epc = epc;
+		if (!used_math()) {     /* First time FPU user.  */
+			err = init_fpu();
+			set_used_math();
+		}
+		lose_fpu(1);    /* Save FPU state for the emulator. */
+
+		err = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
+					       &fault_addr);
+
+		/*
+		 * this is a tricky issue - lose_fpu() uses LL/SC atomics
+		 * if FPU is owned and effectively cancels user level LL/SC.
+		 * So, it could be logical to don't restore FPU ownership here.
+		 * But the sequence of multiple FPU instructions is much much
+		 * more often than LL-FPU-SC and I prefer loop here until
+		 * next scheduler cycle cancels FPU ownership
+		 */
+		own_fpu(1);	/* Restore FPU state. */
+
+		if (err)
+			current->thread.cp0_baduaddr = (unsigned long)fault_addr;
+
+		MIPS_R2_STATS(fpus);
+
+		break;
+
+	case lwl_op:
+		rt = regs->regs[MIPSInst_RT(inst)];
+		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+		if (!access_ok(VERIFY_READ, vaddr, 4)) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGSEGV;
+			break;
+		}
+		__asm__ __volatile__(
+			"	.set	push\n"
+			"	.set	reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+			"1:"	LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 24, 8\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+				ADDIU	"%2, %2, -1\n"
+			"2:"	LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 16, 8\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+				ADDIU	"%2, %2, -1\n"
+			"3:"	LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 8, 8\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+				ADDIU	"%2, %2, -1\n"
+			"4:"	LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 0, 8\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+			"1:"	LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 24, 8\n"
+				ADDIU	"%2, %2, 1\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+			"2:"	LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 16, 8\n"
+				ADDIU	"%2, %2, 1\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+			"3:"	LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 8, 8\n"
+				ADDIU	"%2, %2, 1\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+			"4:"	LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 0, 8\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+			"9:	sll	%0, %0, 0\n"
+			"10:\n"
+			"	.insn\n"
+			"	.section	.fixup,\"ax\"\n"
+			"8:	li	%3,%4\n"
+			"	j	10b\n"
+			"	.previous\n"
+			"	.section	__ex_table,\"a\"\n"
+			"	.word	1b,8b\n"
+			"	.word	2b,8b\n"
+			"	.word	3b,8b\n"
+			"	.word	4b,8b\n"
+			"	.previous\n"
+			"	.set	pop\n"
+			: "+&r"(rt), "=&r"(rs),
+			  "+&r"(vaddr), "+&r"(err)
+			: "i"(SIGSEGV));
+
+		if (MIPSInst_RT(inst) && !err)
+			regs->regs[MIPSInst_RT(inst)] = rt;
+
+		MIPS_R2_STATS(loads);
+
+		break;
+
+	case lwr_op:
+		rt = regs->regs[MIPSInst_RT(inst)];
+		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+		if (!access_ok(VERIFY_READ, vaddr, 4)) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGSEGV;
+			break;
+		}
+		__asm__ __volatile__(
+			"       .set	push\n"
+			"       .set	reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+			"1:"    LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 0, 8\n"
+				ADDIU	"%2, %2, 1\n"
+			"       andi	%1, %2, 0x3\n"
+			"       beq	$0, %1, 9f\n"
+			"2:"    LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 8, 8\n"
+				ADDIU	"%2, %2, 1\n"
+			"       andi	%1, %2, 0x3\n"
+			"       beq	$0, %1, 9f\n"
+			"3:"    LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 16, 8\n"
+				ADDIU	"%2, %2, 1\n"
+			"       andi	%1, %2, 0x3\n"
+			"       beq	$0, %1, 9f\n"
+			"4:"    LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 24, 8\n"
+			"       sll	%0, %0, 0\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+			"1:"    LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 0, 8\n"
+			"       andi	%1, %2, 0x3\n"
+			"       beq	$0, %1, 9f\n"
+				ADDIU	"%2, %2, -1\n"
+			"2:"    LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 8, 8\n"
+			"       andi	%1, %2, 0x3\n"
+			"       beq	$0, %1, 9f\n"
+				ADDIU	"%2, %2, -1\n"
+			"3:"    LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 16, 8\n"
+			"       andi	%1, %2, 0x3\n"
+			"       beq	$0, %1, 9f\n"
+				ADDIU	"%2, %2, -1\n"
+			"4:"    LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 24, 8\n"
+			"       sll	%0, %0, 0\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+			"9:\n"
+			"10:\n"
+			"	.insn\n"
+			"	.section	.fixup,\"ax\"\n"
+			"8:	li	%3,%4\n"
+			"	j	10b\n"
+			"       .previous\n"
+			"	.section	__ex_table,\"a\"\n"
+			"	.word	1b,8b\n"
+			"	.word	2b,8b\n"
+			"	.word	3b,8b\n"
+			"	.word	4b,8b\n"
+			"	.previous\n"
+			"	.set	pop\n"
+			: "+&r"(rt), "=&r"(rs),
+			  "+&r"(vaddr), "+&r"(err)
+			: "i"(SIGSEGV));
+		if (MIPSInst_RT(inst) && !err)
+			regs->regs[MIPSInst_RT(inst)] = rt;
+
+		MIPS_R2_STATS(loads);
+
+		break;
+
+	case swl_op:
+		rt = regs->regs[MIPSInst_RT(inst)];
+		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+		if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGSEGV;
+			break;
+		}
+		__asm__ __volatile__(
+			"	.set	push\n"
+			"	.set	reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+				EXT	"%1, %0, 24, 8\n"
+			"1:"	SB	"%1, 0(%2)\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+				ADDIU	"%2, %2, -1\n"
+				EXT	"%1, %0, 16, 8\n"
+			"2:"	SB	"%1, 0(%2)\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+				ADDIU	"%2, %2, -1\n"
+				EXT	"%1, %0, 8, 8\n"
+			"3:"	SB	"%1, 0(%2)\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+				ADDIU	"%2, %2, -1\n"
+				EXT	"%1, %0, 0, 8\n"
+			"4:"	SB	"%1, 0(%2)\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+				EXT	"%1, %0, 24, 8\n"
+			"1:"	SB	"%1, 0(%2)\n"
+				ADDIU	"%2, %2, 1\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+				EXT	"%1, %0, 16, 8\n"
+			"2:"	SB	"%1, 0(%2)\n"
+				ADDIU	"%2, %2, 1\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+				EXT	"%1, %0, 8, 8\n"
+			"3:"	SB	"%1, 0(%2)\n"
+				ADDIU	"%2, %2, 1\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+				EXT	"%1, %0, 0, 8\n"
+			"4:"	SB	"%1, 0(%2)\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+			"9:\n"
+			"	.insn\n"
+			"       .section        .fixup,\"ax\"\n"
+			"8:	li	%3,%4\n"
+			"	j	9b\n"
+			"	.previous\n"
+			"	.section        __ex_table,\"a\"\n"
+			"	.word	1b,8b\n"
+			"	.word	2b,8b\n"
+			"	.word	3b,8b\n"
+			"	.word	4b,8b\n"
+			"	.previous\n"
+			"	.set	pop\n"
+			: "+&r"(rt), "=&r"(rs),
+			  "+&r"(vaddr), "+&r"(err)
+			: "i"(SIGSEGV)
+			: "memory");
+
+		MIPS_R2_STATS(stores);
+
+		break;
+
+	case swr_op:
+		rt = regs->regs[MIPSInst_RT(inst)];
+		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+		if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGSEGV;
+			break;
+		}
+		__asm__ __volatile__(
+			"	.set	push\n"
+			"	.set	reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+				EXT	"%1, %0, 0, 8\n"
+			"1:"	SB	"%1, 0(%2)\n"
+				ADDIU	"%2, %2, 1\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+				EXT	"%1, %0, 8, 8\n"
+			"2:"	SB	"%1, 0(%2)\n"
+				ADDIU	"%2, %2, 1\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+				EXT	"%1, %0, 16, 8\n"
+			"3:"	SB	"%1, 0(%2)\n"
+				ADDIU	"%2, %2, 1\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+				EXT	"%1, %0, 24, 8\n"
+			"4:"	SB	"%1, 0(%2)\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+				EXT	"%1, %0, 0, 8\n"
+			"1:"	SB	"%1, 0(%2)\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+				ADDIU	"%2, %2, -1\n"
+				EXT	"%1, %0, 8, 8\n"
+			"2:"	SB	"%1, 0(%2)\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+				ADDIU	"%2, %2, -1\n"
+				EXT	"%1, %0, 16, 8\n"
+			"3:"	SB	"%1, 0(%2)\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+				ADDIU	"%2, %2, -1\n"
+				EXT	"%1, %0, 24, 8\n"
+			"4:"	SB	"%1, 0(%2)\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+			"9:\n"
+			"	.insn\n"
+			"	.section        .fixup,\"ax\"\n"
+			"8:	li	%3,%4\n"
+			"	j	9b\n"
+			"	.previous\n"
+			"	.section        __ex_table,\"a\"\n"
+			"	.word	1b,8b\n"
+			"	.word	2b,8b\n"
+			"	.word	3b,8b\n"
+			"	.word	4b,8b\n"
+			"	.previous\n"
+			"	.set	pop\n"
+			: "+&r"(rt), "=&r"(rs),
+			  "+&r"(vaddr), "+&r"(err)
+			: "i"(SIGSEGV)
+			: "memory");
+
+		MIPS_R2_STATS(stores);
+
+		break;
+
+	case ldl_op:
+		if (config_enabled(CONFIG_32BIT)) {
+		    err = SIGILL;
+		    break;
+		}
+
+		rt = regs->regs[MIPSInst_RT(inst)];
+		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+		if (!access_ok(VERIFY_READ, vaddr, 8)) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGSEGV;
+			break;
+		}
+		__asm__ __volatile__(
+			"	.set    push\n"
+			"	.set    reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+			"1:	lb	%1, 0(%2)\n"
+			"	dinsu	%0, %1, 56, 8\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	daddiu	%2, %2, -1\n"
+			"2:	lb	%1, 0(%2)\n"
+			"	dinsu	%0, %1, 48, 8\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	daddiu	%2, %2, -1\n"
+			"3:	lb	%1, 0(%2)\n"
+			"	dinsu	%0, %1, 40, 8\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	daddiu	%2, %2, -1\n"
+			"4:	lb	%1, 0(%2)\n"
+			"	dinsu	%0, %1, 32, 8\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	daddiu	%2, %2, -1\n"
+			"5:	lb	%1, 0(%2)\n"
+			"	dins	%0, %1, 24, 8\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	daddiu	%2, %2, -1\n"
+			"6:	lb	%1, 0(%2)\n"
+			"	dins	%0, %1, 16, 8\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	daddiu	%2, %2, -1\n"
+			"7:	lb	%1, 0(%2)\n"
+			"	dins	%0, %1, 8, 8\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	daddiu	%2, %2, -1\n"
+			"0:	lb	%1, 0(%2)\n"
+			"	dins	%0, %1, 0, 8\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+			"1:	lb	%1, 0(%2)\n"
+			"	dinsu	%0, %1, 56, 8\n"
+			"	daddiu	%2, %2, 1\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"2:	lb	%1, 0(%2)\n"
+			"	dinsu	%0, %1, 48, 8\n"
+			"	daddiu	%2, %2, 1\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"3:	lb	%1, 0(%2)\n"
+			"	dinsu	%0, %1, 40, 8\n"
+			"	daddiu  %2, %2, 1\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"4:	lb	%1, 0(%2)\n"
+			"	dinsu	%0, %1, 32, 8\n"
+			"	daddiu	%2, %2, 1\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"5:	lb	%1, 0(%2)\n"
+			"	dins	%0, %1, 24, 8\n"
+			"	daddiu	%2, %2, 1\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"6:	lb	%1, 0(%2)\n"
+			"	dins	%0, %1, 16, 8\n"
+			"	daddiu	%2, %2, 1\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"7:	lb	%1, 0(%2)\n"
+			"	dins	%0, %1, 8, 8\n"
+			"	daddiu	%2, %2, 1\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"0:	lb	%1, 0(%2)\n"
+			"	dins	%0, %1, 0, 8\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+			"9:\n"
+			"	.insn\n"
+			"	.section        .fixup,\"ax\"\n"
+			"8:	li	%3,%4\n"
+			"	j	9b\n"
+			"	.previous\n"
+			"	.section        __ex_table,\"a\"\n"
+			"	.word	1b,8b\n"
+			"	.word	2b,8b\n"
+			"	.word	3b,8b\n"
+			"	.word	4b,8b\n"
+			"	.word	5b,8b\n"
+			"	.word	6b,8b\n"
+			"	.word	7b,8b\n"
+			"	.word	0b,8b\n"
+			"	.previous\n"
+			"	.set	pop\n"
+			: "+&r"(rt), "=&r"(rs),
+			  "+&r"(vaddr), "+&r"(err)
+			: "i"(SIGSEGV));
+		if (MIPSInst_RT(inst) && !err)
+			regs->regs[MIPSInst_RT(inst)] = rt;
+
+		MIPS_R2_STATS(loads);
+		break;
+
+	case ldr_op:
+		if (config_enabled(CONFIG_32BIT)) {
+		    err = SIGILL;
+		    break;
+		}
+
+		rt = regs->regs[MIPSInst_RT(inst)];
+		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+		if (!access_ok(VERIFY_READ, vaddr, 8)) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGSEGV;
+			break;
+		}
+		__asm__ __volatile__(
+			"	.set    push\n"
+			"	.set    reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+			"1:	lb      %1, 0(%2)\n"
+			"	dins   %0, %1, 0, 8\n"
+			"	daddiu  %2, %2, 1\n"
+			"	andi    %1, %2, 0x7\n"
+			"	beq     $0, %1, 9f\n"
+			"2:	lb      %1, 0(%2)\n"
+			"	dins   %0, %1, 8, 8\n"
+			"	daddiu  %2, %2, 1\n"
+			"	andi    %1, %2, 0x7\n"
+			"	beq     $0, %1, 9f\n"
+			"3:	lb      %1, 0(%2)\n"
+			"	dins   %0, %1, 16, 8\n"
+			"	daddiu  %2, %2, 1\n"
+			"	andi    %1, %2, 0x7\n"
+			"	beq     $0, %1, 9f\n"
+			"4:	lb      %1, 0(%2)\n"
+			"	dins   %0, %1, 24, 8\n"
+			"	daddiu  %2, %2, 1\n"
+			"	andi    %1, %2, 0x7\n"
+			"	beq     $0, %1, 9f\n"
+			"5:	lb      %1, 0(%2)\n"
+			"	dinsu    %0, %1, 32, 8\n"
+			"	daddiu  %2, %2, 1\n"
+			"	andi    %1, %2, 0x7\n"
+			"	beq     $0, %1, 9f\n"
+			"6:	lb      %1, 0(%2)\n"
+			"	dinsu    %0, %1, 40, 8\n"
+			"	daddiu  %2, %2, 1\n"
+			"	andi    %1, %2, 0x7\n"
+			"	beq     $0, %1, 9f\n"
+			"7:	lb      %1, 0(%2)\n"
+			"	dinsu    %0, %1, 48, 8\n"
+			"	daddiu  %2, %2, 1\n"
+			"	andi    %1, %2, 0x7\n"
+			"	beq     $0, %1, 9f\n"
+			"0:	lb      %1, 0(%2)\n"
+			"	dinsu    %0, %1, 56, 8\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+			"1:	lb      %1, 0(%2)\n"
+			"	dins   %0, %1, 0, 8\n"
+			"	andi    %1, %2, 0x7\n"
+			"	beq     $0, %1, 9f\n"
+			"	daddiu  %2, %2, -1\n"
+			"2:	lb      %1, 0(%2)\n"
+			"	dins   %0, %1, 8, 8\n"
+			"	andi    %1, %2, 0x7\n"
+			"	beq     $0, %1, 9f\n"
+			"	daddiu  %2, %2, -1\n"
+			"3:	lb      %1, 0(%2)\n"
+			"	dins   %0, %1, 16, 8\n"
+			"	andi    %1, %2, 0x7\n"
+			"	beq     $0, %1, 9f\n"
+			"	daddiu  %2, %2, -1\n"
+			"4:	lb      %1, 0(%2)\n"
+			"	dins   %0, %1, 24, 8\n"
+			"	andi    %1, %2, 0x7\n"
+			"	beq     $0, %1, 9f\n"
+			"	daddiu  %2, %2, -1\n"
+			"5:	lb      %1, 0(%2)\n"
+			"	dinsu    %0, %1, 32, 8\n"
+			"	andi    %1, %2, 0x7\n"
+			"	beq     $0, %1, 9f\n"
+			"	daddiu  %2, %2, -1\n"
+			"6:	lb      %1, 0(%2)\n"
+			"	dinsu    %0, %1, 40, 8\n"
+			"	andi    %1, %2, 0x7\n"
+			"	beq     $0, %1, 9f\n"
+			"	daddiu  %2, %2, -1\n"
+			"7:	lb      %1, 0(%2)\n"
+			"	dinsu    %0, %1, 48, 8\n"
+			"	andi    %1, %2, 0x7\n"
+			"	beq     $0, %1, 9f\n"
+			"	daddiu  %2, %2, -1\n"
+			"0:	lb      %1, 0(%2)\n"
+			"	dinsu    %0, %1, 56, 8\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+			"9:\n"
+			"	.insn\n"
+			"	.section        .fixup,\"ax\"\n"
+			"8:	li     %3,%4\n"
+			"	j      9b\n"
+			"	.previous\n"
+			"	.section        __ex_table,\"a\"\n"
+			"	.word  1b,8b\n"
+			"	.word  2b,8b\n"
+			"	.word  3b,8b\n"
+			"	.word  4b,8b\n"
+			"	.word  5b,8b\n"
+			"	.word  6b,8b\n"
+			"	.word  7b,8b\n"
+			"	.word  0b,8b\n"
+			"	.previous\n"
+			"	.set    pop\n"
+			: "+&r"(rt), "=&r"(rs),
+			  "+&r"(vaddr), "+&r"(err)
+			: "i"(SIGSEGV));
+		if (MIPSInst_RT(inst) && !err)
+			regs->regs[MIPSInst_RT(inst)] = rt;
+
+		MIPS_R2_STATS(loads);
+		break;
+
+	case sdl_op:
+		if (config_enabled(CONFIG_32BIT)) {
+		    err = SIGILL;
+		    break;
+		}
+
+		rt = regs->regs[MIPSInst_RT(inst)];
+		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+		if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGSEGV;
+			break;
+		}
+		__asm__ __volatile__(
+			"	.set	push\n"
+			"	.set	reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+			"	dextu	%1, %0, 56, 8\n"
+			"1:	sb	%1, 0(%2)\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	daddiu	%2, %2, -1\n"
+			"	dextu	%1, %0, 48, 8\n"
+			"2:	sb	%1, 0(%2)\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	daddiu	%2, %2, -1\n"
+			"	dextu	%1, %0, 40, 8\n"
+			"3:	sb	%1, 0(%2)\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	daddiu	%2, %2, -1\n"
+			"	dextu	%1, %0, 32, 8\n"
+			"4:	sb	%1, 0(%2)\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	daddiu	%2, %2, -1\n"
+			"	dext	%1, %0, 24, 8\n"
+			"5:	sb	%1, 0(%2)\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	daddiu	%2, %2, -1\n"
+			"	dext	%1, %0, 16, 8\n"
+			"6:	sb	%1, 0(%2)\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	daddiu	%2, %2, -1\n"
+			"	dext	%1, %0, 8, 8\n"
+			"7:	sb	%1, 0(%2)\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	daddiu	%2, %2, -1\n"
+			"	dext	%1, %0, 0, 8\n"
+			"0:	sb	%1, 0(%2)\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+			"	dextu	%1, %0, 56, 8\n"
+			"1:	sb	%1, 0(%2)\n"
+			"	daddiu	%2, %2, 1\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	dextu	%1, %0, 48, 8\n"
+			"2:	sb	%1, 0(%2)\n"
+			"	daddiu	%2, %2, 1\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	dextu	%1, %0, 40, 8\n"
+			"3:	sb	%1, 0(%2)\n"
+			"	daddiu	%2, %2, 1\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	dextu	%1, %0, 32, 8\n"
+			"4:	sb	%1, 0(%2)\n"
+			"	daddiu	%2, %2, 1\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	dext	%1, %0, 24, 8\n"
+			"5:	sb	%1, 0(%2)\n"
+			"	daddiu	%2, %2, 1\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	dext	%1, %0, 16, 8\n"
+			"6:	sb	%1, 0(%2)\n"
+			"	daddiu	%2, %2, 1\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	dext	%1, %0, 8, 8\n"
+			"7:	sb	%1, 0(%2)\n"
+			"	daddiu	%2, %2, 1\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	dext	%1, %0, 0, 8\n"
+			"0:	sb	%1, 0(%2)\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+			"9:\n"
+			"	.insn\n"
+			"	.section        .fixup,\"ax\"\n"
+			"8:	li	%3,%4\n"
+			"	j	9b\n"
+			"	.previous\n"
+			"	.section        __ex_table,\"a\"\n"
+			"	.word	1b,8b\n"
+			"	.word	2b,8b\n"
+			"	.word	3b,8b\n"
+			"	.word	4b,8b\n"
+			"	.word	5b,8b\n"
+			"	.word	6b,8b\n"
+			"	.word	7b,8b\n"
+			"	.word	0b,8b\n"
+			"	.previous\n"
+			"	.set	pop\n"
+			: "+&r"(rt), "=&r"(rs),
+			  "+&r"(vaddr), "+&r"(err)
+			: "i"(SIGSEGV)
+			: "memory");
+
+		MIPS_R2_STATS(stores);
+		break;
+
+	case sdr_op:
+		if (config_enabled(CONFIG_32BIT)) {
+		    err = SIGILL;
+		    break;
+		}
+
+		rt = regs->regs[MIPSInst_RT(inst)];
+		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+		if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGSEGV;
+			break;
+		}
+		__asm__ __volatile__(
+			"       .set	push\n"
+			"       .set	reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+			"       dext	%1, %0, 0, 8\n"
+			"1:     sb	%1, 0(%2)\n"
+			"       daddiu	%2, %2, 1\n"
+			"       andi	%1, %2, 0x7\n"
+			"       beq	$0, %1, 9f\n"
+			"       dext	%1, %0, 8, 8\n"
+			"2:     sb	%1, 0(%2)\n"
+			"       daddiu	%2, %2, 1\n"
+			"       andi	%1, %2, 0x7\n"
+			"       beq	$0, %1, 9f\n"
+			"       dext	%1, %0, 16, 8\n"
+			"3:     sb	%1, 0(%2)\n"
+			"       daddiu	%2, %2, 1\n"
+			"       andi	%1, %2, 0x7\n"
+			"       beq	$0, %1, 9f\n"
+			"       dext	%1, %0, 24, 8\n"
+			"4:     sb	%1, 0(%2)\n"
+			"       daddiu	%2, %2, 1\n"
+			"       andi	%1, %2, 0x7\n"
+			"       beq	$0, %1, 9f\n"
+			"       dextu	%1, %0, 32, 8\n"
+			"5:     sb	%1, 0(%2)\n"
+			"       daddiu	%2, %2, 1\n"
+			"       andi	%1, %2, 0x7\n"
+			"       beq	$0, %1, 9f\n"
+			"       dextu	%1, %0, 40, 8\n"
+			"6:     sb	%1, 0(%2)\n"
+			"       daddiu	%2, %2, 1\n"
+			"       andi	%1, %2, 0x7\n"
+			"       beq	$0, %1, 9f\n"
+			"       dextu	%1, %0, 48, 8\n"
+			"7:     sb	%1, 0(%2)\n"
+			"       daddiu	%2, %2, 1\n"
+			"       andi	%1, %2, 0x7\n"
+			"       beq	$0, %1, 9f\n"
+			"       dextu	%1, %0, 56, 8\n"
+			"0:     sb	%1, 0(%2)\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+			"       dext	%1, %0, 0, 8\n"
+			"1:     sb	%1, 0(%2)\n"
+			"       andi	%1, %2, 0x7\n"
+			"       beq	$0, %1, 9f\n"
+			"       daddiu	%2, %2, -1\n"
+			"       dext	%1, %0, 8, 8\n"
+			"2:     sb	%1, 0(%2)\n"
+			"       andi	%1, %2, 0x7\n"
+			"       beq	$0, %1, 9f\n"
+			"       daddiu	%2, %2, -1\n"
+			"       dext	%1, %0, 16, 8\n"
+			"3:     sb	%1, 0(%2)\n"
+			"       andi	%1, %2, 0x7\n"
+			"       beq	$0, %1, 9f\n"
+			"       daddiu	%2, %2, -1\n"
+			"       dext	%1, %0, 24, 8\n"
+			"4:     sb	%1, 0(%2)\n"
+			"       andi	%1, %2, 0x7\n"
+			"       beq	$0, %1, 9f\n"
+			"       daddiu	%2, %2, -1\n"
+			"       dextu	%1, %0, 32, 8\n"
+			"5:     sb	%1, 0(%2)\n"
+			"       andi	%1, %2, 0x7\n"
+			"       beq	$0, %1, 9f\n"
+			"       daddiu	%2, %2, -1\n"
+			"       dextu	%1, %0, 40, 8\n"
+			"6:     sb	%1, 0(%2)\n"
+			"       andi	%1, %2, 0x7\n"
+			"       beq	$0, %1, 9f\n"
+			"       daddiu	%2, %2, -1\n"
+			"       dextu	%1, %0, 48, 8\n"
+			"7:     sb	%1, 0(%2)\n"
+			"       andi	%1, %2, 0x7\n"
+			"       beq	$0, %1, 9f\n"
+			"       daddiu	%2, %2, -1\n"
+			"       dextu	%1, %0, 56, 8\n"
+			"0:     sb	%1, 0(%2)\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+			"9:\n"
+			"       .insn\n"
+			"       .section        .fixup,\"ax\"\n"
+			"8:     li	%3,%4\n"
+			"       j	9b\n"
+			"       .previous\n"
+			"       .section        __ex_table,\"a\"\n"
+			"       .word	1b,8b\n"
+			"       .word	2b,8b\n"
+			"       .word	3b,8b\n"
+			"       .word	4b,8b\n"
+			"       .word	5b,8b\n"
+			"       .word	6b,8b\n"
+			"       .word	7b,8b\n"
+			"       .word	0b,8b\n"
+			"       .previous\n"
+			"       .set	pop\n"
+			: "+&r"(rt), "=&r"(rs),
+			  "+&r"(vaddr), "+&r"(err)
+			: "i"(SIGSEGV)
+			: "memory");
+
+		MIPS_R2_STATS(stores);
+
+		break;
+	case ll_op:
+		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+		if (vaddr & 0x3) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGBUS;
+			break;
+		}
+		if (!access_ok(VERIFY_READ, vaddr, 4)) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGBUS;
+			break;
+		}
+
+		if (!cpu_has_rw_llb) {
+			/*
+			 * An LL/SC block can't be safely emulated without
+			 * a Config5/LLB availability. So it's probably time to
+			 * kill our process before things get any worse. This is
+			 * because Config5/LLB allows us to use ERETNC so that
+			 * the LLAddr/LLB bit is not cleared when we return from
+			 * an exception. MIPS R2 LL/SC instructions trap with an
+			 * RI exception so once we emulate them here, we return
+			 * back to userland with ERETNC. That preserves the
+			 * LLAddr/LLB so the subsequent SC instruction will
+			 * succeed preserving the atomic semantics of the LL/SC
+			 * block. Without that, there is no safe way to emulate
+			 * an LL/SC block in MIPSR2 userland.
+			 */
+			pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
+			err = SIGKILL;
+			break;
+		}
+
+		__asm__ __volatile__(
+			"1:\n"
+			"ll	%0, 0(%2)\n"
+			"2:\n"
+			".insn\n"
+			".section        .fixup,\"ax\"\n"
+			"3:\n"
+			"li	%1, %3\n"
+			"j	2b\n"
+			".previous\n"
+			".section        __ex_table,\"a\"\n"
+			".word  1b, 3b\n"
+			".previous\n"
+			: "=&r"(res), "+&r"(err)
+			: "r"(vaddr), "i"(SIGSEGV)
+			: "memory");
+
+		if (MIPSInst_RT(inst) && !err)
+			regs->regs[MIPSInst_RT(inst)] = res;
+		MIPS_R2_STATS(llsc);
+
+		break;
+
+	case sc_op:
+		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+		if (vaddr & 0x3) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGBUS;
+			break;
+		}
+		if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGBUS;
+			break;
+		}
+
+		if (!cpu_has_rw_llb) {
+			/*
+			 * An LL/SC block can't be safely emulated without
+			 * a Config5/LLB availability. So it's probably time to
+			 * kill our process before things get any worse. This is
+			 * because Config5/LLB allows us to use ERETNC so that
+			 * the LLAddr/LLB bit is not cleared when we return from
+			 * an exception. MIPS R2 LL/SC instructions trap with an
+			 * RI exception so once we emulate them here, we return
+			 * back to userland with ERETNC. That preserves the
+			 * LLAddr/LLB so the subsequent SC instruction will
+			 * succeed preserving the atomic semantics of the LL/SC
+			 * block. Without that, there is no safe way to emulate
+			 * an LL/SC block in MIPSR2 userland.
+			 */
+			pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
+			err = SIGKILL;
+			break;
+		}
+
+		res = regs->regs[MIPSInst_RT(inst)];
+
+		__asm__ __volatile__(
+			"1:\n"
+			"sc	%0, 0(%2)\n"
+			"2:\n"
+			".insn\n"
+			".section        .fixup,\"ax\"\n"
+			"3:\n"
+			"li	%1, %3\n"
+			"j	2b\n"
+			".previous\n"
+			".section        __ex_table,\"a\"\n"
+			".word	1b, 3b\n"
+			".previous\n"
+			: "+&r"(res), "+&r"(err)
+			: "r"(vaddr), "i"(SIGSEGV));
+
+		if (MIPSInst_RT(inst) && !err)
+			regs->regs[MIPSInst_RT(inst)] = res;
+
+		MIPS_R2_STATS(llsc);
+
+		break;
+
+	case lld_op:
+		if (config_enabled(CONFIG_32BIT)) {
+		    err = SIGILL;
+		    break;
+		}
+
+		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+		if (vaddr & 0x7) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGBUS;
+			break;
+		}
+		if (!access_ok(VERIFY_READ, vaddr, 8)) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGBUS;
+			break;
+		}
+
+		if (!cpu_has_rw_llb) {
+			/*
+			 * An LL/SC block can't be safely emulated without
+			 * a Config5/LLB availability. So it's probably time to
+			 * kill our process before things get any worse. This is
+			 * because Config5/LLB allows us to use ERETNC so that
+			 * the LLAddr/LLB bit is not cleared when we return from
+			 * an exception. MIPS R2 LL/SC instructions trap with an
+			 * RI exception so once we emulate them here, we return
+			 * back to userland with ERETNC. That preserves the
+			 * LLAddr/LLB so the subsequent SC instruction will
+			 * succeed preserving the atomic semantics of the LL/SC
+			 * block. Without that, there is no safe way to emulate
+			 * an LL/SC block in MIPSR2 userland.
+			 */
+			pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
+			err = SIGKILL;
+			break;
+		}
+
+		__asm__ __volatile__(
+			"1:\n"
+			"lld	%0, 0(%2)\n"
+			"2:\n"
+			".insn\n"
+			".section        .fixup,\"ax\"\n"
+			"3:\n"
+			"li	%1, %3\n"
+			"j	2b\n"
+			".previous\n"
+			".section        __ex_table,\"a\"\n"
+			".word  1b, 3b\n"
+			".previous\n"
+			: "=&r"(res), "+&r"(err)
+			: "r"(vaddr), "i"(SIGSEGV)
+			: "memory");
+		if (MIPSInst_RT(inst) && !err)
+			regs->regs[MIPSInst_RT(inst)] = res;
+
+		MIPS_R2_STATS(llsc);
+
+		break;
+
+	case scd_op:
+		if (config_enabled(CONFIG_32BIT)) {
+		    err = SIGILL;
+		    break;
+		}
+
+		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+		if (vaddr & 0x7) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGBUS;
+			break;
+		}
+		if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGBUS;
+			break;
+		}
+
+		if (!cpu_has_rw_llb) {
+			/*
+			 * An LL/SC block can't be safely emulated without
+			 * a Config5/LLB availability. So it's probably time to
+			 * kill our process before things get any worse. This is
+			 * because Config5/LLB allows us to use ERETNC so that
+			 * the LLAddr/LLB bit is not cleared when we return from
+			 * an exception. MIPS R2 LL/SC instructions trap with an
+			 * RI exception so once we emulate them here, we return
+			 * back to userland with ERETNC. That preserves the
+			 * LLAddr/LLB so the subsequent SC instruction will
+			 * succeed preserving the atomic semantics of the LL/SC
+			 * block. Without that, there is no safe way to emulate
+			 * an LL/SC block in MIPSR2 userland.
+			 */
+			pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
+			err = SIGKILL;
+			break;
+		}
+
+		res = regs->regs[MIPSInst_RT(inst)];
+
+		__asm__ __volatile__(
+			"1:\n"
+			"scd	%0, 0(%2)\n"
+			"2:\n"
+			".insn\n"
+			".section        .fixup,\"ax\"\n"
+			"3:\n"
+			"li	%1, %3\n"
+			"j	2b\n"
+			".previous\n"
+			".section        __ex_table,\"a\"\n"
+			".word	1b, 3b\n"
+			".previous\n"
+			: "+&r"(res), "+&r"(err)
+			: "r"(vaddr), "i"(SIGSEGV));
+
+		if (MIPSInst_RT(inst) && !err)
+			regs->regs[MIPSInst_RT(inst)] = res;
+
+		MIPS_R2_STATS(llsc);
+
+		break;
+	case pref_op:
+		/* skip it */
+		break;
+	default:
+		err = SIGILL;
+	}
+
+	/*
+	 * Lets not return to userland just yet. It's constly and
+	 * it's likely we have more R2 instructions to emulate
+	 */
+	if (!err && (pass++ < MIPS_R2_EMUL_TOTAL_PASS)) {
+		regs->cp0_cause &= ~CAUSEF_BD;
+		err = get_user(inst, (u32 __user *)regs->cp0_epc);
+		if (!err)
+			goto repeat;
+
+		if (err < 0)
+			err = SIGSEGV;
+	}
+
+	if (err && (err != SIGEMT)) {
+		regs->regs[31] = r31;
+		regs->cp0_epc = epc;
+	}
+
+	/* Likely a MIPS R6 compatible instruction */
+	if (pass && (err == SIGILL))
+		err = 0;
+
+	return err;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static int mipsr2_stats_show(struct seq_file *s, void *unused)
+{
+
+	seq_printf(s, "Instruction\tTotal\tBDslot\n------------------------------\n");
+	seq_printf(s, "movs\t\t%ld\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2emustats.movs),
+		   (unsigned long)__this_cpu_read(mipsr2bdemustats.movs));
+	seq_printf(s, "hilo\t\t%ld\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2emustats.hilo),
+		   (unsigned long)__this_cpu_read(mipsr2bdemustats.hilo));
+	seq_printf(s, "muls\t\t%ld\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2emustats.muls),
+		   (unsigned long)__this_cpu_read(mipsr2bdemustats.muls));
+	seq_printf(s, "divs\t\t%ld\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2emustats.divs),
+		   (unsigned long)__this_cpu_read(mipsr2bdemustats.divs));
+	seq_printf(s, "dsps\t\t%ld\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2emustats.dsps),
+		   (unsigned long)__this_cpu_read(mipsr2bdemustats.dsps));
+	seq_printf(s, "bops\t\t%ld\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2emustats.bops),
+		   (unsigned long)__this_cpu_read(mipsr2bdemustats.bops));
+	seq_printf(s, "traps\t\t%ld\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2emustats.traps),
+		   (unsigned long)__this_cpu_read(mipsr2bdemustats.traps));
+	seq_printf(s, "fpus\t\t%ld\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2emustats.fpus),
+		   (unsigned long)__this_cpu_read(mipsr2bdemustats.fpus));
+	seq_printf(s, "loads\t\t%ld\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2emustats.loads),
+		   (unsigned long)__this_cpu_read(mipsr2bdemustats.loads));
+	seq_printf(s, "stores\t\t%ld\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2emustats.stores),
+		   (unsigned long)__this_cpu_read(mipsr2bdemustats.stores));
+	seq_printf(s, "llsc\t\t%ld\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2emustats.llsc),
+		   (unsigned long)__this_cpu_read(mipsr2bdemustats.llsc));
+	seq_printf(s, "dsemul\t\t%ld\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2emustats.dsemul),
+		   (unsigned long)__this_cpu_read(mipsr2bdemustats.dsemul));
+	seq_printf(s, "jr\t\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2bremustats.jrs));
+	seq_printf(s, "bltzl\t\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2bremustats.bltzl));
+	seq_printf(s, "bgezl\t\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2bremustats.bgezl));
+	seq_printf(s, "bltzll\t\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2bremustats.bltzll));
+	seq_printf(s, "bgezll\t\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2bremustats.bgezll));
+	seq_printf(s, "bltzal\t\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2bremustats.bltzal));
+	seq_printf(s, "bgezal\t\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2bremustats.bgezal));
+	seq_printf(s, "beql\t\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2bremustats.beql));
+	seq_printf(s, "bnel\t\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2bremustats.bnel));
+	seq_printf(s, "blezl\t\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2bremustats.blezl));
+	seq_printf(s, "bgtzl\t\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2bremustats.bgtzl));
+
+	return 0;
+}
+
+static int mipsr2_stats_clear_show(struct seq_file *s, void *unused)
+{
+	mipsr2_stats_show(s, unused);
+
+	__this_cpu_write((mipsr2emustats).movs, 0);
+	__this_cpu_write((mipsr2bdemustats).movs, 0);
+	__this_cpu_write((mipsr2emustats).hilo, 0);
+	__this_cpu_write((mipsr2bdemustats).hilo, 0);
+	__this_cpu_write((mipsr2emustats).muls, 0);
+	__this_cpu_write((mipsr2bdemustats).muls, 0);
+	__this_cpu_write((mipsr2emustats).divs, 0);
+	__this_cpu_write((mipsr2bdemustats).divs, 0);
+	__this_cpu_write((mipsr2emustats).dsps, 0);
+	__this_cpu_write((mipsr2bdemustats).dsps, 0);
+	__this_cpu_write((mipsr2emustats).bops, 0);
+	__this_cpu_write((mipsr2bdemustats).bops, 0);
+	__this_cpu_write((mipsr2emustats).traps, 0);
+	__this_cpu_write((mipsr2bdemustats).traps, 0);
+	__this_cpu_write((mipsr2emustats).fpus, 0);
+	__this_cpu_write((mipsr2bdemustats).fpus, 0);
+	__this_cpu_write((mipsr2emustats).loads, 0);
+	__this_cpu_write((mipsr2bdemustats).loads, 0);
+	__this_cpu_write((mipsr2emustats).stores, 0);
+	__this_cpu_write((mipsr2bdemustats).stores, 0);
+	__this_cpu_write((mipsr2emustats).llsc, 0);
+	__this_cpu_write((mipsr2bdemustats).llsc, 0);
+	__this_cpu_write((mipsr2emustats).dsemul, 0);
+	__this_cpu_write((mipsr2bdemustats).dsemul, 0);
+	__this_cpu_write((mipsr2bremustats).jrs, 0);
+	__this_cpu_write((mipsr2bremustats).bltzl, 0);
+	__this_cpu_write((mipsr2bremustats).bgezl, 0);
+	__this_cpu_write((mipsr2bremustats).bltzll, 0);
+	__this_cpu_write((mipsr2bremustats).bgezll, 0);
+	__this_cpu_write((mipsr2bremustats).bltzal, 0);
+	__this_cpu_write((mipsr2bremustats).bgezal, 0);
+	__this_cpu_write((mipsr2bremustats).beql, 0);
+	__this_cpu_write((mipsr2bremustats).bnel, 0);
+	__this_cpu_write((mipsr2bremustats).blezl, 0);
+	__this_cpu_write((mipsr2bremustats).bgtzl, 0);
+
+	return 0;
+}
+
+static int mipsr2_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mipsr2_stats_show, inode->i_private);
+}
+
+static int mipsr2_stats_clear_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mipsr2_stats_clear_show, inode->i_private);
+}
+
+static const struct file_operations mipsr2_emul_fops = {
+	.open                   = mipsr2_stats_open,
+	.read			= seq_read,
+	.llseek			= seq_lseek,
+	.release		= single_release,
+};
+
+static const struct file_operations mipsr2_clear_fops = {
+	.open                   = mipsr2_stats_clear_open,
+	.read			= seq_read,
+	.llseek			= seq_lseek,
+	.release		= single_release,
+};
+
+
+static int __init mipsr2_init_debugfs(void)
+{
+	extern struct dentry	*mips_debugfs_dir;
+	struct dentry		*mipsr2_emul;
+
+	if (!mips_debugfs_dir)
+		return -ENODEV;
+
+	mipsr2_emul = debugfs_create_file("r2_emul_stats", S_IRUGO,
+					  mips_debugfs_dir, NULL,
+					  &mipsr2_emul_fops);
+	if (!mipsr2_emul)
+		return -ENOMEM;
+
+	mipsr2_emul = debugfs_create_file("r2_emul_stats_clear", S_IRUGO,
+					  mips_debugfs_dir, NULL,
+					  &mipsr2_clear_fops);
+	if (!mipsr2_emul)
+		return -ENOMEM;
+
+	return 0;
+}
+
+device_initcall(mipsr2_init_debugfs);
+
+#endif /* CONFIG_DEBUG_FS */

+ 2 - 0
arch/mips/kernel/mips_ksyms.c

@@ -77,11 +77,13 @@ EXPORT_SYMBOL(__strnlen_kernel_asm);
 EXPORT_SYMBOL(__strnlen_user_nocheck_asm);
 EXPORT_SYMBOL(__strnlen_user_nocheck_asm);
 EXPORT_SYMBOL(__strnlen_user_asm);
 EXPORT_SYMBOL(__strnlen_user_asm);
 
 
+#ifndef CONFIG_CPU_MIPSR6
 EXPORT_SYMBOL(csum_partial);
 EXPORT_SYMBOL(csum_partial);
 EXPORT_SYMBOL(csum_partial_copy_nocheck);
 EXPORT_SYMBOL(csum_partial_copy_nocheck);
 EXPORT_SYMBOL(__csum_partial_copy_kernel);
 EXPORT_SYMBOL(__csum_partial_copy_kernel);
 EXPORT_SYMBOL(__csum_partial_copy_to_user);
 EXPORT_SYMBOL(__csum_partial_copy_to_user);
 EXPORT_SYMBOL(__csum_partial_copy_from_user);
 EXPORT_SYMBOL(__csum_partial_copy_from_user);
+#endif
 
 
 EXPORT_SYMBOL(invalid_pte_table);
 EXPORT_SYMBOL(invalid_pte_table);
 #ifdef CONFIG_FUNCTION_TRACER
 #ifdef CONFIG_FUNCTION_TRACER

+ 7 - 1
arch/mips/kernel/proc.c

@@ -82,7 +82,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
 		seq_printf(m, "]\n");
 		seq_printf(m, "]\n");
 	}
 	}
 
 
-	seq_printf(m, "isa\t\t\t: mips1");
+	seq_printf(m, "isa\t\t\t:"); 
+	if (cpu_has_mips_r1)
+		seq_printf(m, " mips1");
 	if (cpu_has_mips_2)
 	if (cpu_has_mips_2)
 		seq_printf(m, "%s", " mips2");
 		seq_printf(m, "%s", " mips2");
 	if (cpu_has_mips_3)
 	if (cpu_has_mips_3)
@@ -95,10 +97,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
 		seq_printf(m, "%s", " mips32r1");
 		seq_printf(m, "%s", " mips32r1");
 	if (cpu_has_mips32r2)
 	if (cpu_has_mips32r2)
 		seq_printf(m, "%s", " mips32r2");
 		seq_printf(m, "%s", " mips32r2");
+	if (cpu_has_mips32r6)
+		seq_printf(m, "%s", " mips32r6");
 	if (cpu_has_mips64r1)
 	if (cpu_has_mips64r1)
 		seq_printf(m, "%s", " mips64r1");
 		seq_printf(m, "%s", " mips64r1");
 	if (cpu_has_mips64r2)
 	if (cpu_has_mips64r2)
 		seq_printf(m, "%s", " mips64r2");
 		seq_printf(m, "%s", " mips64r2");
+	if (cpu_has_mips64r6)
+		seq_printf(m, "%s", " mips64r6");
 	seq_printf(m, "\n");
 	seq_printf(m, "\n");
 
 
 	seq_printf(m, "ASEs implemented\t:");
 	seq_printf(m, "ASEs implemented\t:");

+ 4 - 0
arch/mips/kernel/process.c

@@ -581,6 +581,10 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
 	if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre)
 	if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre)
 		return -EOPNOTSUPP;
 		return -EOPNOTSUPP;
 
 
+	/* FR = 0 not supported in MIPS R6 */
+	if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6)
+		return -EOPNOTSUPP;
+
 	/* Save FP & vector context, then disable FPU & MSA */
 	/* Save FP & vector context, then disable FPU & MSA */
 	if (task->signal == current->signal)
 	if (task->signal == current->signal)
 		lose_fpu(1);
 		lose_fpu(1);

+ 9 - 3
arch/mips/kernel/r4k_fpu.S

@@ -34,7 +34,7 @@
 	.endm
 	.endm
 
 
 	.set	noreorder
 	.set	noreorder
-	.set	arch=r4000
+	.set	MIPS_ISA_ARCH_LEVEL_RAW
 
 
 LEAF(_save_fp_context)
 LEAF(_save_fp_context)
 	.set	push
 	.set	push
@@ -42,7 +42,8 @@ LEAF(_save_fp_context)
 	cfc1	t1, fcr31
 	cfc1	t1, fcr31
 	.set	pop
 	.set	pop
 
 
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
+		defined(CONFIG_CPU_MIPS32_R6)
 	.set	push
 	.set	push
 	SET_HARDFLOAT
 	SET_HARDFLOAT
 #ifdef CONFIG_CPU_MIPS32_R2
 #ifdef CONFIG_CPU_MIPS32_R2
@@ -105,10 +106,12 @@ LEAF(_save_fp_context32)
 	SET_HARDFLOAT
 	SET_HARDFLOAT
 	cfc1	t1, fcr31
 	cfc1	t1, fcr31
 
 
+#ifndef CONFIG_CPU_MIPS64_R6
 	mfc0	t0, CP0_STATUS
 	mfc0	t0, CP0_STATUS
 	sll	t0, t0, 5
 	sll	t0, t0, 5
 	bgez	t0, 1f			# skip storing odd if FR=0
 	bgez	t0, 1f			# skip storing odd if FR=0
 	 nop
 	 nop
+#endif
 
 
 	/* Store the 16 odd double precision registers */
 	/* Store the 16 odd double precision registers */
 	EX      sdc1 $f1, SC32_FPREGS+8(a0)
 	EX      sdc1 $f1, SC32_FPREGS+8(a0)
@@ -163,7 +166,8 @@ LEAF(_save_fp_context32)
 LEAF(_restore_fp_context)
 LEAF(_restore_fp_context)
 	EX	lw t1, SC_FPC_CSR(a0)
 	EX	lw t1, SC_FPC_CSR(a0)
 
 
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)  || \
+		defined(CONFIG_CPU_MIPS32_R6)
 	.set	push
 	.set	push
 	SET_HARDFLOAT
 	SET_HARDFLOAT
 #ifdef CONFIG_CPU_MIPS32_R2
 #ifdef CONFIG_CPU_MIPS32_R2
@@ -223,10 +227,12 @@ LEAF(_restore_fp_context32)
 	SET_HARDFLOAT
 	SET_HARDFLOAT
 	EX	lw t1, SC32_FPC_CSR(a0)
 	EX	lw t1, SC32_FPC_CSR(a0)
 
 
+#ifndef CONFIG_CPU_MIPS64_R6
 	mfc0	t0, CP0_STATUS
 	mfc0	t0, CP0_STATUS
 	sll	t0, t0, 5
 	sll	t0, t0, 5
 	bgez	t0, 1f			# skip loading odd if FR=0
 	bgez	t0, 1f			# skip loading odd if FR=0
 	 nop
 	 nop
+#endif
 
 
 	EX      ldc1 $f1, SC32_FPREGS+8(a0)
 	EX      ldc1 $f1, SC32_FPREGS+8(a0)
 	EX      ldc1 $f3, SC32_FPREGS+24(a0)
 	EX      ldc1 $f3, SC32_FPREGS+24(a0)

+ 8 - 6
arch/mips/kernel/r4k_switch.S

@@ -115,7 +115,8 @@
  * Save a thread's fp context.
  * Save a thread's fp context.
  */
  */
 LEAF(_save_fp)
 LEAF(_save_fp)
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
+		defined(CONFIG_CPU_MIPS32_R6)
 	mfc0	t0, CP0_STATUS
 	mfc0	t0, CP0_STATUS
 #endif
 #endif
 	fpu_save_double a0 t0 t1		# clobbers t1
 	fpu_save_double a0 t0 t1		# clobbers t1
@@ -126,7 +127,8 @@ LEAF(_save_fp)
  * Restore a thread's fp context.
  * Restore a thread's fp context.
  */
  */
 LEAF(_restore_fp)
 LEAF(_restore_fp)
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
+		defined(CONFIG_CPU_MIPS32_R6)
 	mfc0	t0, CP0_STATUS
 	mfc0	t0, CP0_STATUS
 #endif
 #endif
 	fpu_restore_double a0 t0 t1		# clobbers t1
 	fpu_restore_double a0 t0 t1		# clobbers t1
@@ -240,9 +242,9 @@ LEAF(_init_fpu)
 	mtc1	t1, $f30
 	mtc1	t1, $f30
 	mtc1	t1, $f31
 	mtc1	t1, $f31
 
 
-#ifdef CONFIG_CPU_MIPS32_R2
+#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6)
 	.set    push
 	.set    push
-	.set    mips32r2
+	.set    MIPS_ISA_LEVEL_RAW
 	.set	fp=64
 	.set	fp=64
 	sll     t0, t0, 5			# is Status.FR set?
 	sll     t0, t0, 5			# is Status.FR set?
 	bgez    t0, 1f				# no: skip setting upper 32b
 	bgez    t0, 1f				# no: skip setting upper 32b
@@ -280,9 +282,9 @@ LEAF(_init_fpu)
 	mthc1   t1, $f30
 	mthc1   t1, $f30
 	mthc1   t1, $f31
 	mthc1   t1, $f31
 1:	.set    pop
 1:	.set    pop
-#endif /* CONFIG_CPU_MIPS32_R2 */
+#endif /* CONFIG_CPU_MIPS32_R2 || CONFIG_CPU_MIPS32_R6 */
 #else
 #else
-	.set	arch=r4000
+	.set	MIPS_ISA_ARCH_LEVEL_RAW
 	dmtc1	t1, $f0
 	dmtc1	t1, $f0
 	dmtc1	t1, $f2
 	dmtc1	t1, $f2
 	dmtc1	t1, $f4
 	dmtc1	t1, $f4

+ 1 - 0
arch/mips/kernel/spram.c

@@ -208,6 +208,7 @@ void spram_config(void)
 	case CPU_INTERAPTIV:
 	case CPU_INTERAPTIV:
 	case CPU_PROAPTIV:
 	case CPU_PROAPTIV:
 	case CPU_P5600:
 	case CPU_P5600:
+	case CPU_QEMU_GENERIC:
 		config0 = read_c0_config();
 		config0 = read_c0_config();
 		/* FIXME: addresses are Malta specific */
 		/* FIXME: addresses are Malta specific */
 		if (config0 & (1<<24)) {
 		if (config0 & (1<<24)) {

+ 1 - 1
arch/mips/kernel/syscall.c

@@ -136,7 +136,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
 		: "memory");
 		: "memory");
 	} else if (cpu_has_llsc) {
 	} else if (cpu_has_llsc) {
 		__asm__ __volatile__ (
 		__asm__ __volatile__ (
-		"	.set	arch=r4000				\n"
+		"	.set	"MIPS_ISA_ARCH_LEVEL"			\n"
 		"	li	%[err], 0				\n"
 		"	li	%[err], 0				\n"
 		"1:	ll	%[old], (%[addr])			\n"
 		"1:	ll	%[old], (%[addr])			\n"
 		"	move	%[tmp], %[new]				\n"
 		"	move	%[tmp], %[new]				\n"

+ 35 - 6
arch/mips/kernel/traps.c

@@ -46,6 +46,7 @@
 #include <asm/fpu.h>
 #include <asm/fpu.h>
 #include <asm/fpu_emulator.h>
 #include <asm/fpu_emulator.h>
 #include <asm/idle.h>
 #include <asm/idle.h>
+#include <asm/mips-r2-to-r6-emul.h>
 #include <asm/mipsregs.h>
 #include <asm/mipsregs.h>
 #include <asm/mipsmtregs.h>
 #include <asm/mipsmtregs.h>
 #include <asm/module.h>
 #include <asm/module.h>
@@ -837,7 +838,7 @@ out:
 	exception_exit(prev_state);
 	exception_exit(prev_state);
 }
 }
 
 
-static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
+void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
 	const char *str)
 	const char *str)
 {
 {
 	siginfo_t info;
 	siginfo_t info;
@@ -1027,7 +1028,34 @@ asmlinkage void do_ri(struct pt_regs *regs)
 	unsigned int opcode = 0;
 	unsigned int opcode = 0;
 	int status = -1;
 	int status = -1;
 
 
+	/*
+	 * Avoid any kernel code. Just emulate the R2 instruction
+	 * as quickly as possible.
+	 */
+	if (mipsr2_emulation && cpu_has_mips_r6 &&
+	    likely(user_mode(regs))) {
+		if (likely(get_user(opcode, epc) >= 0)) {
+			status = mipsr2_decoder(regs, opcode);
+			switch (status) {
+			case 0:
+			case SIGEMT:
+				task_thread_info(current)->r2_emul_return = 1;
+				return;
+			case SIGILL:
+				goto no_r2_instr;
+			default:
+				process_fpemu_return(status,
+						     &current->thread.cp0_baduaddr);
+				task_thread_info(current)->r2_emul_return = 1;
+				return;
+			}
+		}
+	}
+
+no_r2_instr:
+
 	prev_state = exception_enter();
 	prev_state = exception_enter();
+
 	if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs),
 	if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs),
 		       SIGILL) == NOTIFY_STOP)
 		       SIGILL) == NOTIFY_STOP)
 		goto out;
 		goto out;
@@ -1559,6 +1587,7 @@ static inline void parity_protection_init(void)
 	case CPU_INTERAPTIV:
 	case CPU_INTERAPTIV:
 	case CPU_PROAPTIV:
 	case CPU_PROAPTIV:
 	case CPU_P5600:
 	case CPU_P5600:
+	case CPU_QEMU_GENERIC:
 		{
 		{
 #define ERRCTL_PE	0x80000000
 #define ERRCTL_PE	0x80000000
 #define ERRCTL_L2P	0x00800000
 #define ERRCTL_L2P	0x00800000
@@ -1648,7 +1677,7 @@ asmlinkage void cache_parity_error(void)
 	printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
 	printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
 	       reg_val & (1<<30) ? "secondary" : "primary",
 	       reg_val & (1<<30) ? "secondary" : "primary",
 	       reg_val & (1<<31) ? "data" : "insn");
 	       reg_val & (1<<31) ? "data" : "insn");
-	if (cpu_has_mips_r2 &&
+	if ((cpu_has_mips_r2_r6) &&
 	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
 	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
 		pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
 		pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
 			reg_val & (1<<29) ? "ED " : "",
 			reg_val & (1<<29) ? "ED " : "",
@@ -1688,7 +1717,7 @@ asmlinkage void do_ftlb(void)
 	unsigned int reg_val;
 	unsigned int reg_val;
 
 
 	/* For the moment, report the problem and hang. */
 	/* For the moment, report the problem and hang. */
-	if (cpu_has_mips_r2 &&
+	if ((cpu_has_mips_r2_r6) &&
 	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
 	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
 		pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
 		pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
 		       read_c0_ecc());
 		       read_c0_ecc());
@@ -1977,7 +2006,7 @@ static void configure_hwrena(void)
 {
 {
 	unsigned int hwrena = cpu_hwrena_impl_bits;
 	unsigned int hwrena = cpu_hwrena_impl_bits;
 
 
-	if (cpu_has_mips_r2)
+	if (cpu_has_mips_r2_r6)
 		hwrena |= 0x0000000f;
 		hwrena |= 0x0000000f;
 
 
 	if (!noulri && cpu_has_userlocal)
 	if (!noulri && cpu_has_userlocal)
@@ -2021,7 +2050,7 @@ void per_cpu_trap_init(bool is_boot_cpu)
 	 *  o read IntCtl.IPTI to determine the timer interrupt
 	 *  o read IntCtl.IPTI to determine the timer interrupt
 	 *  o read IntCtl.IPPCI to determine the performance counter interrupt
 	 *  o read IntCtl.IPPCI to determine the performance counter interrupt
 	 */
 	 */
-	if (cpu_has_mips_r2) {
+	if (cpu_has_mips_r2_r6) {
 		cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
 		cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
 		cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
 		cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
 		cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
 		cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
@@ -2112,7 +2141,7 @@ void __init trap_init(void)
 #else
 #else
         ebase = CKSEG0;
         ebase = CKSEG0;
 #endif
 #endif
-		if (cpu_has_mips_r2)
+		if (cpu_has_mips_r2_r6)
 			ebase += (read_c0_ebase() & 0x3ffff000);
 			ebase += (read_c0_ebase() & 0x3ffff000);
 	}
 	}
 
 

+ 386 - 4
arch/mips/kernel/unaligned.c

@@ -129,6 +129,7 @@ extern void show_registers(struct pt_regs *regs);
 			: "=&r" (value), "=r" (res)         \
 			: "=&r" (value), "=r" (res)         \
 			: "r" (addr), "i" (-EFAULT));
 			: "r" (addr), "i" (-EFAULT));
 
 
+#ifndef CONFIG_CPU_MIPSR6
 #define     LoadW(addr, value, res)   \
 #define     LoadW(addr, value, res)   \
 		__asm__ __volatile__ (                      \
 		__asm__ __volatile__ (                      \
 			"1:\t"user_lwl("%0", "(%2)")"\n"    \
 			"1:\t"user_lwl("%0", "(%2)")"\n"    \
@@ -146,6 +147,39 @@ extern void show_registers(struct pt_regs *regs);
 			".previous"                         \
 			".previous"                         \
 			: "=&r" (value), "=r" (res)         \
 			: "=&r" (value), "=r" (res)         \
 			: "r" (addr), "i" (-EFAULT));
 			: "r" (addr), "i" (-EFAULT));
+#else
+/* MIPSR6 has no lwl instruction */
+#define     LoadW(addr, value, res) \
+		__asm__ __volatile__ (			    \
+			".set\tpush\n"			    \
+			".set\tnoat\n\t"		    \
+			"1:"user_lb("%0", "0(%2)")"\n\t"    \
+			"2:"user_lbu("$1", "1(%2)")"\n\t"   \
+			"sll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"3:"user_lbu("$1", "2(%2)")"\n\t"   \
+			"sll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"4:"user_lbu("$1", "3(%2)")"\n\t"   \
+			"sll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"li\t%1, 0\n"			    \
+			".set\tpop\n"			    \
+			"10:\n\t"			    \
+			".insn\n\t"			    \
+			".section\t.fixup,\"ax\"\n\t"	    \
+			"11:\tli\t%1, %3\n\t"		    \
+			"j\t10b\n\t"			    \
+			".previous\n\t"			    \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 11b\n\t"		    \
+			STR(PTR)"\t2b, 11b\n\t"		    \
+			STR(PTR)"\t3b, 11b\n\t"		    \
+			STR(PTR)"\t4b, 11b\n\t"		    \
+			".previous"			    \
+			: "=&r" (value), "=r" (res)	    \
+			: "r" (addr), "i" (-EFAULT));
+#endif /* CONFIG_CPU_MIPSR6 */
 
 
 #define     LoadHWU(addr, value, res) \
 #define     LoadHWU(addr, value, res) \
 		__asm__ __volatile__ (                      \
 		__asm__ __volatile__ (                      \
@@ -169,6 +203,7 @@ extern void show_registers(struct pt_regs *regs);
 			: "=&r" (value), "=r" (res)         \
 			: "=&r" (value), "=r" (res)         \
 			: "r" (addr), "i" (-EFAULT));
 			: "r" (addr), "i" (-EFAULT));
 
 
+#ifndef CONFIG_CPU_MIPSR6
 #define     LoadWU(addr, value, res)  \
 #define     LoadWU(addr, value, res)  \
 		__asm__ __volatile__ (                      \
 		__asm__ __volatile__ (                      \
 			"1:\t"user_lwl("%0", "(%2)")"\n"    \
 			"1:\t"user_lwl("%0", "(%2)")"\n"    \
@@ -206,6 +241,87 @@ extern void show_registers(struct pt_regs *regs);
 			".previous"                         \
 			".previous"                         \
 			: "=&r" (value), "=r" (res)         \
 			: "=&r" (value), "=r" (res)         \
 			: "r" (addr), "i" (-EFAULT));
 			: "r" (addr), "i" (-EFAULT));
+#else
+/* MIPSR6 has not lwl and ldl instructions */
+#define	    LoadWU(addr, value, res) \
+		__asm__ __volatile__ (			    \
+			".set\tpush\n\t"		    \
+			".set\tnoat\n\t"		    \
+			"1:"user_lbu("%0", "0(%2)")"\n\t"   \
+			"2:"user_lbu("$1", "1(%2)")"\n\t"   \
+			"sll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"3:"user_lbu("$1", "2(%2)")"\n\t"   \
+			"sll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"4:"user_lbu("$1", "3(%2)")"\n\t"   \
+			"sll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"li\t%1, 0\n"			    \
+			".set\tpop\n"			    \
+			"10:\n\t"			    \
+			".insn\n\t"			    \
+			".section\t.fixup,\"ax\"\n\t"	    \
+			"11:\tli\t%1, %3\n\t"		    \
+			"j\t10b\n\t"			    \
+			".previous\n\t"			    \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 11b\n\t"		    \
+			STR(PTR)"\t2b, 11b\n\t"		    \
+			STR(PTR)"\t3b, 11b\n\t"		    \
+			STR(PTR)"\t4b, 11b\n\t"		    \
+			".previous"			    \
+			: "=&r" (value), "=r" (res)	    \
+			: "r" (addr), "i" (-EFAULT));
+
+#define     LoadDW(addr, value, res)  \
+		__asm__ __volatile__ (			    \
+			".set\tpush\n\t"		    \
+			".set\tnoat\n\t"		    \
+			"1:lb\t%0, 0(%2)\n\t"    	    \
+			"2:lbu\t $1, 1(%2)\n\t"   	    \
+			"dsll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"3:lbu\t$1, 2(%2)\n\t"   	    \
+			"dsll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"4:lbu\t$1, 3(%2)\n\t"   	    \
+			"dsll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"5:lbu\t$1, 4(%2)\n\t"   	    \
+			"dsll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"6:lbu\t$1, 5(%2)\n\t"   	    \
+			"dsll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"7:lbu\t$1, 6(%2)\n\t"   	    \
+			"dsll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"8:lbu\t$1, 7(%2)\n\t"   	    \
+			"dsll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"li\t%1, 0\n"			    \
+			".set\tpop\n\t"			    \
+			"10:\n\t"			    \
+			".insn\n\t"			    \
+			".section\t.fixup,\"ax\"\n\t"	    \
+			"11:\tli\t%1, %3\n\t"		    \
+			"j\t10b\n\t"			    \
+			".previous\n\t"			    \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 11b\n\t"		    \
+			STR(PTR)"\t2b, 11b\n\t"		    \
+			STR(PTR)"\t3b, 11b\n\t"		    \
+			STR(PTR)"\t4b, 11b\n\t"		    \
+			STR(PTR)"\t5b, 11b\n\t"		    \
+			STR(PTR)"\t6b, 11b\n\t"		    \
+			STR(PTR)"\t7b, 11b\n\t"		    \
+			STR(PTR)"\t8b, 11b\n\t"		    \
+			".previous"			    \
+			: "=&r" (value), "=r" (res)	    \
+			: "r" (addr), "i" (-EFAULT));
+#endif /* CONFIG_CPU_MIPSR6 */
+
 
 
 #define     StoreHW(addr, value, res) \
 #define     StoreHW(addr, value, res) \
 		__asm__ __volatile__ (                      \
 		__asm__ __volatile__ (                      \
@@ -228,6 +344,7 @@ extern void show_registers(struct pt_regs *regs);
 			: "=r" (res)                        \
 			: "=r" (res)                        \
 			: "r" (value), "r" (addr), "i" (-EFAULT));
 			: "r" (value), "r" (addr), "i" (-EFAULT));
 
 
+#ifndef CONFIG_CPU_MIPSR6
 #define     StoreW(addr, value, res)  \
 #define     StoreW(addr, value, res)  \
 		__asm__ __volatile__ (                      \
 		__asm__ __volatile__ (                      \
 			"1:\t"user_swl("%1", "(%2)")"\n"    \
 			"1:\t"user_swl("%1", "(%2)")"\n"    \
@@ -263,9 +380,82 @@ extern void show_registers(struct pt_regs *regs);
 			".previous"                         \
 			".previous"                         \
 		: "=r" (res)                                \
 		: "=r" (res)                                \
 		: "r" (value), "r" (addr), "i" (-EFAULT));
 		: "r" (value), "r" (addr), "i" (-EFAULT));
-#endif
+#else
+/* MIPSR6 has no swl and sdl instructions */
+#define     StoreW(addr, value, res)  \
+		__asm__ __volatile__ (                      \
+			".set\tpush\n\t"		    \
+			".set\tnoat\n\t"		    \
+			"1:"user_sb("%1", "3(%2)")"\n\t"    \
+			"srl\t$1, %1, 0x8\n\t"		    \
+			"2:"user_sb("$1", "2(%2)")"\n\t"    \
+			"srl\t$1, $1,  0x8\n\t"		    \
+			"3:"user_sb("$1", "1(%2)")"\n\t"    \
+			"srl\t$1, $1, 0x8\n\t"		    \
+			"4:"user_sb("$1", "0(%2)")"\n\t"    \
+			".set\tpop\n\t"			    \
+			"li\t%0, 0\n"			    \
+			"10:\n\t"			    \
+			".insn\n\t"			    \
+			".section\t.fixup,\"ax\"\n\t"	    \
+			"11:\tli\t%0, %3\n\t"		    \
+			"j\t10b\n\t"			    \
+			".previous\n\t"			    \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 11b\n\t"		    \
+			STR(PTR)"\t2b, 11b\n\t"		    \
+			STR(PTR)"\t3b, 11b\n\t"		    \
+			STR(PTR)"\t4b, 11b\n\t"		    \
+			".previous"			    \
+		: "=&r" (res)			    	    \
+		: "r" (value), "r" (addr), "i" (-EFAULT)    \
+		: "memory");
+
+#define     StoreDW(addr, value, res) \
+		__asm__ __volatile__ (                      \
+			".set\tpush\n\t"		    \
+			".set\tnoat\n\t"		    \
+			"1:sb\t%1, 7(%2)\n\t"    	    \
+			"dsrl\t$1, %1, 0x8\n\t"		    \
+			"2:sb\t$1, 6(%2)\n\t"    	    \
+			"dsrl\t$1, $1, 0x8\n\t"		    \
+			"3:sb\t$1, 5(%2)\n\t"    	    \
+			"dsrl\t$1, $1, 0x8\n\t"		    \
+			"4:sb\t$1, 4(%2)\n\t"    	    \
+			"dsrl\t$1, $1, 0x8\n\t"		    \
+			"5:sb\t$1, 3(%2)\n\t"    	    \
+			"dsrl\t$1, $1, 0x8\n\t"		    \
+			"6:sb\t$1, 2(%2)\n\t"    	    \
+			"dsrl\t$1, $1, 0x8\n\t"		    \
+			"7:sb\t$1, 1(%2)\n\t"    	    \
+			"dsrl\t$1, $1, 0x8\n\t"		    \
+			"8:sb\t$1, 0(%2)\n\t"    	    \
+			"dsrl\t$1, $1, 0x8\n\t"		    \
+			".set\tpop\n\t"			    \
+			"li\t%0, 0\n"			    \
+			"10:\n\t"			    \
+			".insn\n\t"			    \
+			".section\t.fixup,\"ax\"\n\t"	    \
+			"11:\tli\t%0, %3\n\t"		    \
+			"j\t10b\n\t"			    \
+			".previous\n\t"			    \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 11b\n\t"		    \
+			STR(PTR)"\t2b, 11b\n\t"		    \
+			STR(PTR)"\t3b, 11b\n\t"		    \
+			STR(PTR)"\t4b, 11b\n\t"		    \
+			STR(PTR)"\t5b, 11b\n\t"		    \
+			STR(PTR)"\t6b, 11b\n\t"		    \
+			STR(PTR)"\t7b, 11b\n\t"		    \
+			STR(PTR)"\t8b, 11b\n\t"		    \
+			".previous"			    \
+		: "=&r" (res)			    	    \
+		: "r" (value), "r" (addr), "i" (-EFAULT)    \
+		: "memory");
+#endif /* CONFIG_CPU_MIPSR6 */
+
+#else /* __BIG_ENDIAN */
 
 
-#ifdef __LITTLE_ENDIAN
 #define     LoadHW(addr, value, res)  \
 #define     LoadHW(addr, value, res)  \
 		__asm__ __volatile__ (".set\tnoat\n"        \
 		__asm__ __volatile__ (".set\tnoat\n"        \
 			"1:\t"user_lb("%0", "1(%2)")"\n"    \
 			"1:\t"user_lb("%0", "1(%2)")"\n"    \
@@ -286,6 +476,7 @@ extern void show_registers(struct pt_regs *regs);
 			: "=&r" (value), "=r" (res)         \
 			: "=&r" (value), "=r" (res)         \
 			: "r" (addr), "i" (-EFAULT));
 			: "r" (addr), "i" (-EFAULT));
 
 
+#ifndef CONFIG_CPU_MIPSR6
 #define     LoadW(addr, value, res)   \
 #define     LoadW(addr, value, res)   \
 		__asm__ __volatile__ (                      \
 		__asm__ __volatile__ (                      \
 			"1:\t"user_lwl("%0", "3(%2)")"\n"   \
 			"1:\t"user_lwl("%0", "3(%2)")"\n"   \
@@ -303,6 +494,40 @@ extern void show_registers(struct pt_regs *regs);
 			".previous"                         \
 			".previous"                         \
 			: "=&r" (value), "=r" (res)         \
 			: "=&r" (value), "=r" (res)         \
 			: "r" (addr), "i" (-EFAULT));
 			: "r" (addr), "i" (-EFAULT));
+#else
+/* MIPSR6 has no lwl instruction */
+#define     LoadW(addr, value, res) \
+		__asm__ __volatile__ (			    \
+			".set\tpush\n"			    \
+			".set\tnoat\n\t"		    \
+			"1:"user_lb("%0", "3(%2)")"\n\t"    \
+			"2:"user_lbu("$1", "2(%2)")"\n\t"   \
+			"sll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"3:"user_lbu("$1", "1(%2)")"\n\t"   \
+			"sll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"4:"user_lbu("$1", "0(%2)")"\n\t"   \
+			"sll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"li\t%1, 0\n"			    \
+			".set\tpop\n"			    \
+			"10:\n\t"			    \
+			".insn\n\t"			    \
+			".section\t.fixup,\"ax\"\n\t"	    \
+			"11:\tli\t%1, %3\n\t"		    \
+			"j\t10b\n\t"			    \
+			".previous\n\t"			    \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 11b\n\t"		    \
+			STR(PTR)"\t2b, 11b\n\t"		    \
+			STR(PTR)"\t3b, 11b\n\t"		    \
+			STR(PTR)"\t4b, 11b\n\t"		    \
+			".previous"			    \
+			: "=&r" (value), "=r" (res)	    \
+			: "r" (addr), "i" (-EFAULT));
+#endif /* CONFIG_CPU_MIPSR6 */
+
 
 
 #define     LoadHWU(addr, value, res) \
 #define     LoadHWU(addr, value, res) \
 		__asm__ __volatile__ (                      \
 		__asm__ __volatile__ (                      \
@@ -326,6 +551,7 @@ extern void show_registers(struct pt_regs *regs);
 			: "=&r" (value), "=r" (res)         \
 			: "=&r" (value), "=r" (res)         \
 			: "r" (addr), "i" (-EFAULT));
 			: "r" (addr), "i" (-EFAULT));
 
 
+#ifndef CONFIG_CPU_MIPSR6
 #define     LoadWU(addr, value, res)  \
 #define     LoadWU(addr, value, res)  \
 		__asm__ __volatile__ (                      \
 		__asm__ __volatile__ (                      \
 			"1:\t"user_lwl("%0", "3(%2)")"\n"   \
 			"1:\t"user_lwl("%0", "3(%2)")"\n"   \
@@ -363,6 +589,86 @@ extern void show_registers(struct pt_regs *regs);
 			".previous"                         \
 			".previous"                         \
 			: "=&r" (value), "=r" (res)         \
 			: "=&r" (value), "=r" (res)         \
 			: "r" (addr), "i" (-EFAULT));
 			: "r" (addr), "i" (-EFAULT));
+#else
+/* MIPSR6 has not lwl and ldl instructions */
+#define	    LoadWU(addr, value, res) \
+		__asm__ __volatile__ (			    \
+			".set\tpush\n\t"		    \
+			".set\tnoat\n\t"		    \
+			"1:"user_lbu("%0", "3(%2)")"\n\t"   \
+			"2:"user_lbu("$1", "2(%2)")"\n\t"   \
+			"sll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"3:"user_lbu("$1", "1(%2)")"\n\t"   \
+			"sll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"4:"user_lbu("$1", "0(%2)")"\n\t"   \
+			"sll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"li\t%1, 0\n"			    \
+			".set\tpop\n"			    \
+			"10:\n\t"			    \
+			".insn\n\t"			    \
+			".section\t.fixup,\"ax\"\n\t"	    \
+			"11:\tli\t%1, %3\n\t"		    \
+			"j\t10b\n\t"			    \
+			".previous\n\t"			    \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 11b\n\t"		    \
+			STR(PTR)"\t2b, 11b\n\t"		    \
+			STR(PTR)"\t3b, 11b\n\t"		    \
+			STR(PTR)"\t4b, 11b\n\t"		    \
+			".previous"			    \
+			: "=&r" (value), "=r" (res)	    \
+			: "r" (addr), "i" (-EFAULT));
+
+#define     LoadDW(addr, value, res)  \
+		__asm__ __volatile__ (			    \
+			".set\tpush\n\t"		    \
+			".set\tnoat\n\t"		    \
+			"1:lb\t%0, 7(%2)\n\t"    	    \
+			"2:lbu\t$1, 6(%2)\n\t"   	    \
+			"dsll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"3:lbu\t$1, 5(%2)\n\t"   	    \
+			"dsll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"4:lbu\t$1, 4(%2)\n\t"   	    \
+			"dsll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"5:lbu\t$1, 3(%2)\n\t"   	    \
+			"dsll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"6:lbu\t$1, 2(%2)\n\t"   	    \
+			"dsll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"7:lbu\t$1, 1(%2)\n\t"   	    \
+			"dsll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"8:lbu\t$1, 0(%2)\n\t"   	    \
+			"dsll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"li\t%1, 0\n"			    \
+			".set\tpop\n\t"			    \
+			"10:\n\t"			    \
+			".insn\n\t"			    \
+			".section\t.fixup,\"ax\"\n\t"	    \
+			"11:\tli\t%1, %3\n\t"		    \
+			"j\t10b\n\t"			    \
+			".previous\n\t"			    \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 11b\n\t"		    \
+			STR(PTR)"\t2b, 11b\n\t"		    \
+			STR(PTR)"\t3b, 11b\n\t"		    \
+			STR(PTR)"\t4b, 11b\n\t"		    \
+			STR(PTR)"\t5b, 11b\n\t"		    \
+			STR(PTR)"\t6b, 11b\n\t"		    \
+			STR(PTR)"\t7b, 11b\n\t"		    \
+			STR(PTR)"\t8b, 11b\n\t"		    \
+			".previous"			    \
+			: "=&r" (value), "=r" (res)	    \
+			: "r" (addr), "i" (-EFAULT));
+#endif /* CONFIG_CPU_MIPSR6 */
 
 
 #define     StoreHW(addr, value, res) \
 #define     StoreHW(addr, value, res) \
 		__asm__ __volatile__ (                      \
 		__asm__ __volatile__ (                      \
@@ -384,7 +690,7 @@ extern void show_registers(struct pt_regs *regs);
 			".previous"                         \
 			".previous"                         \
 			: "=r" (res)                        \
 			: "=r" (res)                        \
 			: "r" (value), "r" (addr), "i" (-EFAULT));
 			: "r" (value), "r" (addr), "i" (-EFAULT));
-
+#ifndef CONFIG_CPU_MIPSR6
 #define     StoreW(addr, value, res)  \
 #define     StoreW(addr, value, res)  \
 		__asm__ __volatile__ (                      \
 		__asm__ __volatile__ (                      \
 			"1:\t"user_swl("%1", "3(%2)")"\n"   \
 			"1:\t"user_swl("%1", "3(%2)")"\n"   \
@@ -420,6 +726,79 @@ extern void show_registers(struct pt_regs *regs);
 			".previous"                         \
 			".previous"                         \
 		: "=r" (res)                                \
 		: "=r" (res)                                \
 		: "r" (value), "r" (addr), "i" (-EFAULT));
 		: "r" (value), "r" (addr), "i" (-EFAULT));
+#else
+/* MIPSR6 has no swl and sdl instructions */
+#define     StoreW(addr, value, res)  \
+		__asm__ __volatile__ (                      \
+			".set\tpush\n\t"		    \
+			".set\tnoat\n\t"		    \
+			"1:"user_sb("%1", "0(%2)")"\n\t"    \
+			"srl\t$1, %1, 0x8\n\t"		    \
+			"2:"user_sb("$1", "1(%2)")"\n\t"    \
+			"srl\t$1, $1,  0x8\n\t"		    \
+			"3:"user_sb("$1", "2(%2)")"\n\t"    \
+			"srl\t$1, $1, 0x8\n\t"		    \
+			"4:"user_sb("$1", "3(%2)")"\n\t"    \
+			".set\tpop\n\t"			    \
+			"li\t%0, 0\n"			    \
+			"10:\n\t"			    \
+			".insn\n\t"			    \
+			".section\t.fixup,\"ax\"\n\t"	    \
+			"11:\tli\t%0, %3\n\t"		    \
+			"j\t10b\n\t"			    \
+			".previous\n\t"			    \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 11b\n\t"		    \
+			STR(PTR)"\t2b, 11b\n\t"		    \
+			STR(PTR)"\t3b, 11b\n\t"		    \
+			STR(PTR)"\t4b, 11b\n\t"		    \
+			".previous"			    \
+		: "=&r" (res)			    	    \
+		: "r" (value), "r" (addr), "i" (-EFAULT)    \
+		: "memory");
+
+#define     StoreDW(addr, value, res) \
+		__asm__ __volatile__ (                      \
+			".set\tpush\n\t"		    \
+			".set\tnoat\n\t"		    \
+			"1:sb\t%1, 0(%2)\n\t"    	    \
+			"dsrl\t$1, %1, 0x8\n\t"		    \
+			"2:sb\t$1, 1(%2)\n\t"    	    \
+			"dsrl\t$1, $1, 0x8\n\t"		    \
+			"3:sb\t$1, 2(%2)\n\t"    	    \
+			"dsrl\t$1, $1, 0x8\n\t"		    \
+			"4:sb\t$1, 3(%2)\n\t"    	    \
+			"dsrl\t$1, $1, 0x8\n\t"		    \
+			"5:sb\t$1, 4(%2)\n\t"    	    \
+			"dsrl\t$1, $1, 0x8\n\t"		    \
+			"6:sb\t$1, 5(%2)\n\t"    	    \
+			"dsrl\t$1, $1, 0x8\n\t"		    \
+			"7:sb\t$1, 6(%2)\n\t"    	    \
+			"dsrl\t$1, $1, 0x8\n\t"		    \
+			"8:sb\t$1, 7(%2)\n\t"    	    \
+			"dsrl\t$1, $1, 0x8\n\t"		    \
+			".set\tpop\n\t"			    \
+			"li\t%0, 0\n"			    \
+			"10:\n\t"			    \
+			".insn\n\t"			    \
+			".section\t.fixup,\"ax\"\n\t"	    \
+			"11:\tli\t%0, %3\n\t"		    \
+			"j\t10b\n\t"			    \
+			".previous\n\t"			    \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 11b\n\t"		    \
+			STR(PTR)"\t2b, 11b\n\t"		    \
+			STR(PTR)"\t3b, 11b\n\t"		    \
+			STR(PTR)"\t4b, 11b\n\t"		    \
+			STR(PTR)"\t5b, 11b\n\t"		    \
+			STR(PTR)"\t6b, 11b\n\t"		    \
+			STR(PTR)"\t7b, 11b\n\t"		    \
+			STR(PTR)"\t8b, 11b\n\t"		    \
+			".previous"			    \
+		: "=&r" (res)			    	    \
+		: "r" (value), "r" (addr), "i" (-EFAULT)    \
+		: "memory");
+#endif /* CONFIG_CPU_MIPSR6 */
 #endif
 #endif
 
 
 static void emulate_load_store_insn(struct pt_regs *regs,
 static void emulate_load_store_insn(struct pt_regs *regs,
@@ -703,10 +1082,13 @@ static void emulate_load_store_insn(struct pt_regs *regs,
 			break;
 			break;
 		return;
 		return;
 
 
+#ifndef CONFIG_CPU_MIPSR6
 	/*
 	/*
 	 * COP2 is available to implementor for application specific use.
 	 * COP2 is available to implementor for application specific use.
 	 * It's up to applications to register a notifier chain and do
 	 * It's up to applications to register a notifier chain and do
 	 * whatever they have to do, including possible sending of signals.
 	 * whatever they have to do, including possible sending of signals.
+	 *
+	 * This instruction has been reallocated in Release 6
 	 */
 	 */
 	case lwc2_op:
 	case lwc2_op:
 		cu2_notifier_call_chain(CU2_LWC2_OP, regs);
 		cu2_notifier_call_chain(CU2_LWC2_OP, regs);
@@ -723,7 +1105,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
 	case sdc2_op:
 	case sdc2_op:
 		cu2_notifier_call_chain(CU2_SDC2_OP, regs);
 		cu2_notifier_call_chain(CU2_SDC2_OP, regs);
 		break;
 		break;
-
+#endif
 	default:
 	default:
 		/*
 		/*
 		 * Pheeee...  We encountered an yet unknown instruction or
 		 * Pheeee...  We encountered an yet unknown instruction or

+ 1 - 0
arch/mips/lib/Makefile

@@ -8,6 +8,7 @@ lib-y	+= bitops.o csum_partial.o delay.o memcpy.o memset.o \
 
 
 obj-y			+= iomap.o
 obj-y			+= iomap.o
 obj-$(CONFIG_PCI)	+= iomap-pci.o
 obj-$(CONFIG_PCI)	+= iomap-pci.o
+lib-$(CONFIG_GENERIC_CSUM)	:= $(filter-out csum_partial.o, $(lib-y))
 
 
 obj-$(CONFIG_CPU_GENERIC_DUMP_TLB) += dump_tlb.o
 obj-$(CONFIG_CPU_GENERIC_DUMP_TLB) += dump_tlb.o
 obj-$(CONFIG_CPU_R3000)		+= r3k_dump_tlb.o
 obj-$(CONFIG_CPU_R3000)		+= r3k_dump_tlb.o

+ 23 - 0
arch/mips/lib/memcpy.S

@@ -293,9 +293,14 @@
 	 and	t0, src, ADDRMASK
 	 and	t0, src, ADDRMASK
 	PREFS(	0, 2*32(src) )
 	PREFS(	0, 2*32(src) )
 	PREFD(	1, 2*32(dst) )
 	PREFD(	1, 2*32(dst) )
+#ifndef CONFIG_CPU_MIPSR6
 	bnez	t1, .Ldst_unaligned\@
 	bnez	t1, .Ldst_unaligned\@
 	 nop
 	 nop
 	bnez	t0, .Lsrc_unaligned_dst_aligned\@
 	bnez	t0, .Lsrc_unaligned_dst_aligned\@
+#else
+	or	t0, t0, t1
+	bnez	t0, .Lcopy_unaligned_bytes\@
+#endif
 	/*
 	/*
 	 * use delay slot for fall-through
 	 * use delay slot for fall-through
 	 * src and dst are aligned; need to compute rem
 	 * src and dst are aligned; need to compute rem
@@ -376,6 +381,7 @@
 	bne	rem, len, 1b
 	bne	rem, len, 1b
 	.set	noreorder
 	.set	noreorder
 
 
+#ifndef CONFIG_CPU_MIPSR6
 	/*
 	/*
 	 * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
 	 * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
 	 * A loop would do only a byte at a time with possible branch
 	 * A loop would do only a byte at a time with possible branch
@@ -477,6 +483,7 @@
 	bne	len, rem, 1b
 	bne	len, rem, 1b
 	.set	noreorder
 	.set	noreorder
 
 
+#endif /* !CONFIG_CPU_MIPSR6 */
 .Lcopy_bytes_checklen\@:
 .Lcopy_bytes_checklen\@:
 	beqz	len, .Ldone\@
 	beqz	len, .Ldone\@
 	 nop
 	 nop
@@ -504,6 +511,22 @@
 .Ldone\@:
 .Ldone\@:
 	jr	ra
 	jr	ra
 	 nop
 	 nop
+
+#ifdef CONFIG_CPU_MIPSR6
+.Lcopy_unaligned_bytes\@:
+1:
+	COPY_BYTE(0)
+	COPY_BYTE(1)
+	COPY_BYTE(2)
+	COPY_BYTE(3)
+	COPY_BYTE(4)
+	COPY_BYTE(5)
+	COPY_BYTE(6)
+	COPY_BYTE(7)
+	ADD	src, src, 8
+	b	1b
+	 ADD	dst, dst, 8
+#endif /* CONFIG_CPU_MIPSR6 */
 	.if __memcpy == 1
 	.if __memcpy == 1
 	END(memcpy)
 	END(memcpy)
 	.set __memcpy, 0
 	.set __memcpy, 0

+ 47 - 0
arch/mips/lib/memset.S

@@ -111,6 +111,7 @@
 	.set		at
 	.set		at
 #endif
 #endif
 
 
+#ifndef CONFIG_CPU_MIPSR6
 	R10KCBARRIER(0(ra))
 	R10KCBARRIER(0(ra))
 #ifdef __MIPSEB__
 #ifdef __MIPSEB__
 	EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@)	/* make word/dword aligned */
 	EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@)	/* make word/dword aligned */
@@ -120,6 +121,30 @@
 	PTR_SUBU	a0, t0			/* long align ptr */
 	PTR_SUBU	a0, t0			/* long align ptr */
 	PTR_ADDU	a2, t0			/* correct size */
 	PTR_ADDU	a2, t0			/* correct size */
 
 
+#else /* CONFIG_CPU_MIPSR6 */
+#define STORE_BYTE(N)				\
+	EX(sb, a1, N(a0), .Lbyte_fixup\@);	\
+	beqz		t0, 0f;			\
+	PTR_ADDU	t0, 1;
+
+	PTR_ADDU	a2, t0			/* correct size */
+	PTR_ADDU	t0, 1
+	STORE_BYTE(0)
+	STORE_BYTE(1)
+#if LONGSIZE == 4
+	EX(sb, a1, 2(a0), .Lbyte_fixup\@)
+#else
+	STORE_BYTE(2)
+	STORE_BYTE(3)
+	STORE_BYTE(4)
+	STORE_BYTE(5)
+	EX(sb, a1, 6(a0), .Lbyte_fixup\@)
+#endif
+0:
+	ori		a0, STORMASK
+	xori		a0, STORMASK
+	PTR_ADDIU	a0, STORSIZE
+#endif /* CONFIG_CPU_MIPSR6 */
 1:	ori		t1, a2, 0x3f		/* # of full blocks */
 1:	ori		t1, a2, 0x3f		/* # of full blocks */
 	xori		t1, 0x3f
 	xori		t1, 0x3f
 	beqz		t1, .Lmemset_partial\@	/* no block to fill */
 	beqz		t1, .Lmemset_partial\@	/* no block to fill */
@@ -159,6 +184,7 @@
 	andi		a2, STORMASK		/* At most one long to go */
 	andi		a2, STORMASK		/* At most one long to go */
 
 
 	beqz		a2, 1f
 	beqz		a2, 1f
+#ifndef CONFIG_CPU_MIPSR6
 	PTR_ADDU	a0, a2			/* What's left */
 	PTR_ADDU	a0, a2			/* What's left */
 	R10KCBARRIER(0(ra))
 	R10KCBARRIER(0(ra))
 #ifdef __MIPSEB__
 #ifdef __MIPSEB__
@@ -166,6 +192,22 @@
 #else
 #else
 	EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@)
 	EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@)
 #endif
 #endif
+#else
+	PTR_SUBU	t0, $0, a2
+	PTR_ADDIU	t0, 1
+	STORE_BYTE(0)
+	STORE_BYTE(1)
+#if LONGSIZE == 4
+	EX(sb, a1, 2(a0), .Lbyte_fixup\@)
+#else
+	STORE_BYTE(2)
+	STORE_BYTE(3)
+	STORE_BYTE(4)
+	STORE_BYTE(5)
+	EX(sb, a1, 6(a0), .Lbyte_fixup\@)
+#endif
+0:
+#endif
 1:	jr		ra
 1:	jr		ra
 	move		a2, zero
 	move		a2, zero
 
 
@@ -186,6 +228,11 @@
 	.hidden __memset
 	.hidden __memset
 	.endif
 	.endif
 
 
+.Lbyte_fixup\@:
+	PTR_SUBU	a2, $0, t0
+	jr		ra
+	 PTR_ADDIU	a2, 1
+
 .Lfirst_fixup\@:
 .Lfirst_fixup\@:
 	jr	ra
 	jr	ra
 	nop
 	nop

+ 1 - 1
arch/mips/lib/mips-atomic.c

@@ -15,7 +15,7 @@
 #include <linux/export.h>
 #include <linux/export.h>
 #include <linux/stringify.h>
 #include <linux/stringify.h>
 
 
-#ifndef CONFIG_CPU_MIPSR2
+#if !defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_MIPSR6)
 
 
 /*
 /*
  * For cli() we have to insert nops to make sure that the new value
  * For cli() we have to insert nops to make sure that the new value

+ 158 - 11
arch/mips/math-emu/cp1emu.c

@@ -48,6 +48,7 @@
 #include <asm/processor.h>
 #include <asm/processor.h>
 #include <asm/fpu_emulator.h>
 #include <asm/fpu_emulator.h>
 #include <asm/fpu.h>
 #include <asm/fpu.h>
+#include <asm/mips-r2-to-r6-emul.h>
 
 
 #include "ieee754.h"
 #include "ieee754.h"
 
 
@@ -68,7 +69,7 @@ static int fpux_emu(struct pt_regs *,
 #define modeindex(v) ((v) & FPU_CSR_RM)
 #define modeindex(v) ((v) & FPU_CSR_RM)
 
 
 /* convert condition code register number to csr bit */
 /* convert condition code register number to csr bit */
-static const unsigned int fpucondbit[8] = {
+const unsigned int fpucondbit[8] = {
 	FPU_CSR_COND0,
 	FPU_CSR_COND0,
 	FPU_CSR_COND1,
 	FPU_CSR_COND1,
 	FPU_CSR_COND2,
 	FPU_CSR_COND2,
@@ -448,6 +449,9 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
 				dec_insn.next_pc_inc;
 				dec_insn.next_pc_inc;
 			/* Fall through */
 			/* Fall through */
 		case jr_op:
 		case jr_op:
+			/* For R6, JR already emulated in jalr_op */
+			if (NO_R6EMU && insn.r_format.opcode == jr_op)
+				break;
 			*contpc = regs->regs[insn.r_format.rs];
 			*contpc = regs->regs[insn.r_format.rs];
 			return 1;
 			return 1;
 		}
 		}
@@ -456,12 +460,18 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
 		switch (insn.i_format.rt) {
 		switch (insn.i_format.rt) {
 		case bltzal_op:
 		case bltzal_op:
 		case bltzall_op:
 		case bltzall_op:
+			if (NO_R6EMU && (insn.i_format.rs ||
+			    insn.i_format.rt == bltzall_op))
+				break;
+
 			regs->regs[31] = regs->cp0_epc +
 			regs->regs[31] = regs->cp0_epc +
 				dec_insn.pc_inc +
 				dec_insn.pc_inc +
 				dec_insn.next_pc_inc;
 				dec_insn.next_pc_inc;
 			/* Fall through */
 			/* Fall through */
-		case bltz_op:
 		case bltzl_op:
 		case bltzl_op:
+			if (NO_R6EMU)
+				break;
+		case bltz_op:
 			if ((long)regs->regs[insn.i_format.rs] < 0)
 			if ((long)regs->regs[insn.i_format.rs] < 0)
 				*contpc = regs->cp0_epc +
 				*contpc = regs->cp0_epc +
 					dec_insn.pc_inc +
 					dec_insn.pc_inc +
@@ -473,12 +483,18 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
 			return 1;
 			return 1;
 		case bgezal_op:
 		case bgezal_op:
 		case bgezall_op:
 		case bgezall_op:
+			if (NO_R6EMU && (insn.i_format.rs ||
+			    insn.i_format.rt == bgezall_op))
+				break;
+
 			regs->regs[31] = regs->cp0_epc +
 			regs->regs[31] = regs->cp0_epc +
 				dec_insn.pc_inc +
 				dec_insn.pc_inc +
 				dec_insn.next_pc_inc;
 				dec_insn.next_pc_inc;
 			/* Fall through */
 			/* Fall through */
-		case bgez_op:
 		case bgezl_op:
 		case bgezl_op:
+			if (NO_R6EMU)
+				break;
+		case bgez_op:
 			if ((long)regs->regs[insn.i_format.rs] >= 0)
 			if ((long)regs->regs[insn.i_format.rs] >= 0)
 				*contpc = regs->cp0_epc +
 				*contpc = regs->cp0_epc +
 					dec_insn.pc_inc +
 					dec_insn.pc_inc +
@@ -505,8 +521,10 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
 		/* Set microMIPS mode bit: XOR for jalx. */
 		/* Set microMIPS mode bit: XOR for jalx. */
 		*contpc ^= bit;
 		*contpc ^= bit;
 		return 1;
 		return 1;
-	case beq_op:
 	case beql_op:
 	case beql_op:
+		if (NO_R6EMU)
+			break;
+	case beq_op:
 		if (regs->regs[insn.i_format.rs] ==
 		if (regs->regs[insn.i_format.rs] ==
 		    regs->regs[insn.i_format.rt])
 		    regs->regs[insn.i_format.rt])
 			*contpc = regs->cp0_epc +
 			*contpc = regs->cp0_epc +
@@ -517,8 +535,10 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
 				dec_insn.pc_inc +
 				dec_insn.pc_inc +
 				dec_insn.next_pc_inc;
 				dec_insn.next_pc_inc;
 		return 1;
 		return 1;
-	case bne_op:
 	case bnel_op:
 	case bnel_op:
+		if (NO_R6EMU)
+			break;
+	case bne_op:
 		if (regs->regs[insn.i_format.rs] !=
 		if (regs->regs[insn.i_format.rs] !=
 		    regs->regs[insn.i_format.rt])
 		    regs->regs[insn.i_format.rt])
 			*contpc = regs->cp0_epc +
 			*contpc = regs->cp0_epc +
@@ -529,8 +549,34 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
 				dec_insn.pc_inc +
 				dec_insn.pc_inc +
 				dec_insn.next_pc_inc;
 				dec_insn.next_pc_inc;
 		return 1;
 		return 1;
-	case blez_op:
 	case blezl_op:
 	case blezl_op:
+		if (NO_R6EMU)
+			break;
+	case blez_op:
+
+		/*
+		 * Compact branches for R6 for the
+		 * blez and blezl opcodes.
+		 * BLEZ  | rs = 0 | rt != 0  == BLEZALC
+		 * BLEZ  | rs = rt != 0      == BGEZALC
+		 * BLEZ  | rs != 0 | rt != 0 == BGEUC
+		 * BLEZL | rs = 0 | rt != 0  == BLEZC
+		 * BLEZL | rs = rt != 0      == BGEZC
+		 * BLEZL | rs != 0 | rt != 0 == BGEC
+		 *
+		 * For real BLEZ{,L}, rt is always 0.
+		 */
+		if (cpu_has_mips_r6 && insn.i_format.rt) {
+			if ((insn.i_format.opcode == blez_op) &&
+			    ((!insn.i_format.rs && insn.i_format.rt) ||
+			     (insn.i_format.rs == insn.i_format.rt)))
+				regs->regs[31] = regs->cp0_epc +
+					dec_insn.pc_inc;
+			*contpc = regs->cp0_epc + dec_insn.pc_inc +
+				dec_insn.next_pc_inc;
+
+			return 1;
+		}
 		if ((long)regs->regs[insn.i_format.rs] <= 0)
 		if ((long)regs->regs[insn.i_format.rs] <= 0)
 			*contpc = regs->cp0_epc +
 			*contpc = regs->cp0_epc +
 				dec_insn.pc_inc +
 				dec_insn.pc_inc +
@@ -540,8 +586,35 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
 				dec_insn.pc_inc +
 				dec_insn.pc_inc +
 				dec_insn.next_pc_inc;
 				dec_insn.next_pc_inc;
 		return 1;
 		return 1;
-	case bgtz_op:
 	case bgtzl_op:
 	case bgtzl_op:
+		if (NO_R6EMU)
+			break;
+	case bgtz_op:
+		/*
+		 * Compact branches for R6 for the
+		 * bgtz and bgtzl opcodes.
+		 * BGTZ  | rs = 0 | rt != 0  == BGTZALC
+		 * BGTZ  | rs = rt != 0      == BLTZALC
+		 * BGTZ  | rs != 0 | rt != 0 == BLTUC
+		 * BGTZL | rs = 0 | rt != 0  == BGTZC
+		 * BGTZL | rs = rt != 0      == BLTZC
+		 * BGTZL | rs != 0 | rt != 0 == BLTC
+		 *
+		 * *ZALC varint for BGTZ &&& rt != 0
+		 * For real GTZ{,L}, rt is always 0.
+		 */
+		if (cpu_has_mips_r6 && insn.i_format.rt) {
+			if ((insn.i_format.opcode == blez_op) &&
+			    ((!insn.i_format.rs && insn.i_format.rt) ||
+			     (insn.i_format.rs == insn.i_format.rt)))
+				regs->regs[31] = regs->cp0_epc +
+					dec_insn.pc_inc;
+			*contpc = regs->cp0_epc + dec_insn.pc_inc +
+				dec_insn.next_pc_inc;
+
+			return 1;
+		}
+
 		if ((long)regs->regs[insn.i_format.rs] > 0)
 		if ((long)regs->regs[insn.i_format.rs] > 0)
 			*contpc = regs->cp0_epc +
 			*contpc = regs->cp0_epc +
 				dec_insn.pc_inc +
 				dec_insn.pc_inc +
@@ -551,6 +624,16 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
 				dec_insn.pc_inc +
 				dec_insn.pc_inc +
 				dec_insn.next_pc_inc;
 				dec_insn.next_pc_inc;
 		return 1;
 		return 1;
+	case cbcond0_op:
+	case cbcond1_op:
+		if (!cpu_has_mips_r6)
+			break;
+		if (insn.i_format.rt && !insn.i_format.rs)
+			regs->regs[31] = regs->cp0_epc + 4;
+		*contpc = regs->cp0_epc + dec_insn.pc_inc +
+			dec_insn.next_pc_inc;
+
+		return 1;
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
 	case lwc2_op: /* This is bbit0 on Octeon */
 	case lwc2_op: /* This is bbit0 on Octeon */
 		if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0)
 		if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0)
@@ -576,9 +659,73 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
 		else
 		else
 			*contpc = regs->cp0_epc + 8;
 			*contpc = regs->cp0_epc + 8;
 		return 1;
 		return 1;
+#else
+	case bc6_op:
+		/*
+		 * Only valid for MIPS R6 but we can still end up
+		 * here from a broken userland so just tell emulator
+		 * this is not a branch and let it break later on.
+		 */
+		if  (!cpu_has_mips_r6)
+			break;
+		*contpc = regs->cp0_epc + dec_insn.pc_inc +
+			dec_insn.next_pc_inc;
+
+		return 1;
+	case balc6_op:
+		if (!cpu_has_mips_r6)
+			break;
+		regs->regs[31] = regs->cp0_epc + 4;
+		*contpc = regs->cp0_epc + dec_insn.pc_inc +
+			dec_insn.next_pc_inc;
+
+		return 1;
+	case beqzcjic_op:
+		if (!cpu_has_mips_r6)
+			break;
+		*contpc = regs->cp0_epc + dec_insn.pc_inc +
+			dec_insn.next_pc_inc;
+
+		return 1;
+	case bnezcjialc_op:
+		if (!cpu_has_mips_r6)
+			break;
+		if (!insn.i_format.rs)
+			regs->regs[31] = regs->cp0_epc + 4;
+		*contpc = regs->cp0_epc + dec_insn.pc_inc +
+			dec_insn.next_pc_inc;
+
+		return 1;
 #endif
 #endif
 	case cop0_op:
 	case cop0_op:
 	case cop1_op:
 	case cop1_op:
+		/* Need to check for R6 bc1nez and bc1eqz branches */
+		if (cpu_has_mips_r6 &&
+		    ((insn.i_format.rs == bc1eqz_op) ||
+		     (insn.i_format.rs == bc1nez_op))) {
+			bit = 0;
+			switch (insn.i_format.rs) {
+			case bc1eqz_op:
+				if (get_fpr32(&current->thread.fpu.fpr[insn.i_format.rt], 0) & 0x1)
+				    bit = 1;
+				break;
+			case bc1nez_op:
+				if (!(get_fpr32(&current->thread.fpu.fpr[insn.i_format.rt], 0) & 0x1))
+				    bit = 1;
+				break;
+			}
+			if (bit)
+				*contpc = regs->cp0_epc +
+					dec_insn.pc_inc +
+					(insn.i_format.simmediate << 2);
+			else
+				*contpc = regs->cp0_epc +
+					dec_insn.pc_inc +
+					dec_insn.next_pc_inc;
+
+			return 1;
+		}
+		/* R2/R6 compatible cop1 instruction. Fall through */
 	case cop2_op:
 	case cop2_op:
 	case cop1x_op:
 	case cop1x_op:
 		if (insn.i_format.rs == bc_op) {
 		if (insn.i_format.rs == bc_op) {
@@ -1414,14 +1561,14 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
 		 * achieve full IEEE-754 accuracy - however this emulator does.
 		 * achieve full IEEE-754 accuracy - however this emulator does.
 		 */
 		 */
 		case frsqrt_op:
 		case frsqrt_op:
-			if (!cpu_has_mips_4_5_r2)
+			if (!cpu_has_mips_4_5_r2_r6)
 				return SIGILL;
 				return SIGILL;
 
 
 			handler.u = fpemu_sp_rsqrt;
 			handler.u = fpemu_sp_rsqrt;
 			goto scopuop;
 			goto scopuop;
 
 
 		case frecip_op:
 		case frecip_op:
-			if (!cpu_has_mips_4_5_r2)
+			if (!cpu_has_mips_4_5_r2_r6)
 				return SIGILL;
 				return SIGILL;
 
 
 			handler.u = fpemu_sp_recip;
 			handler.u = fpemu_sp_recip;
@@ -1616,13 +1763,13 @@ copcsr:
 		 * achieve full IEEE-754 accuracy - however this emulator does.
 		 * achieve full IEEE-754 accuracy - however this emulator does.
 		 */
 		 */
 		case frsqrt_op:
 		case frsqrt_op:
-			if (!cpu_has_mips_4_5_r2)
+			if (!cpu_has_mips_4_5_r2_r6)
 				return SIGILL;
 				return SIGILL;
 
 
 			handler.u = fpemu_dp_rsqrt;
 			handler.u = fpemu_dp_rsqrt;
 			goto dcopuop;
 			goto dcopuop;
 		case frecip_op:
 		case frecip_op:
-			if (!cpu_has_mips_4_5_r2)
+			if (!cpu_has_mips_4_5_r2_r6)
 				return SIGILL;
 				return SIGILL;
 
 
 			handler.u = fpemu_dp_recip;
 			handler.u = fpemu_dp_recip;

+ 4 - 2
arch/mips/mm/c-r4k.c

@@ -794,7 +794,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
 		__asm__ __volatile__ (
 		__asm__ __volatile__ (
 			".set push\n\t"
 			".set push\n\t"
 			".set noat\n\t"
 			".set noat\n\t"
-			".set mips3\n\t"
+			".set "MIPS_ISA_LEVEL"\n\t"
 #ifdef CONFIG_32BIT
 #ifdef CONFIG_32BIT
 			"la	$at,1f\n\t"
 			"la	$at,1f\n\t"
 #endif
 #endif
@@ -1255,6 +1255,7 @@ static void probe_pcache(void)
 	case CPU_P5600:
 	case CPU_P5600:
 	case CPU_PROAPTIV:
 	case CPU_PROAPTIV:
 	case CPU_M5150:
 	case CPU_M5150:
+	case CPU_QEMU_GENERIC:
 		if (!(read_c0_config7() & MIPS_CONF7_IAR) &&
 		if (!(read_c0_config7() & MIPS_CONF7_IAR) &&
 		    (c->icache.waysize > PAGE_SIZE))
 		    (c->icache.waysize > PAGE_SIZE))
 			c->icache.flags |= MIPS_CACHE_ALIASES;
 			c->icache.flags |= MIPS_CACHE_ALIASES;
@@ -1472,7 +1473,8 @@ static void setup_scache(void)
 
 
 	default:
 	default:
 		if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
 		if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
-				    MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
+				    MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 |
+				    MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)) {
 #ifdef CONFIG_MIPS_CPU_SCACHE
 #ifdef CONFIG_MIPS_CPU_SCACHE
 			if (mips_sc_init ()) {
 			if (mips_sc_init ()) {
 				scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
 				scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;

+ 26 - 4
arch/mips/mm/page.c

@@ -72,6 +72,20 @@ static struct uasm_reloc relocs[5];
 #define cpu_is_r4600_v1_x()	((read_c0_prid() & 0xfffffff0) == 0x00002010)
 #define cpu_is_r4600_v1_x()	((read_c0_prid() & 0xfffffff0) == 0x00002010)
 #define cpu_is_r4600_v2_x()	((read_c0_prid() & 0xfffffff0) == 0x00002020)
 #define cpu_is_r4600_v2_x()	((read_c0_prid() & 0xfffffff0) == 0x00002020)
 
 
+/*
+ * R6 has a limited offset of the pref instruction.
+ * Skip it if the offset is more than 9 bits.
+ */
+#define _uasm_i_pref(a, b, c, d)		\
+do {						\
+	if (cpu_has_mips_r6) {			\
+		if (c <= 0xff && c >= -0x100)	\
+			uasm_i_pref(a, b, c, d);\
+	} else {				\
+		uasm_i_pref(a, b, c, d);	\
+	}					\
+} while(0)
+
 static int pref_bias_clear_store;
 static int pref_bias_clear_store;
 static int pref_bias_copy_load;
 static int pref_bias_copy_load;
 static int pref_bias_copy_store;
 static int pref_bias_copy_store;
@@ -178,7 +192,15 @@ static void set_prefetch_parameters(void)
 			pref_bias_copy_load = 256;
 			pref_bias_copy_load = 256;
 			pref_bias_copy_store = 128;
 			pref_bias_copy_store = 128;
 			pref_src_mode = Pref_LoadStreamed;
 			pref_src_mode = Pref_LoadStreamed;
-			pref_dst_mode = Pref_PrepareForStore;
+			if (cpu_has_mips_r6)
+				/*
+				 * Bit 30 (Pref_PrepareForStore) has been
+				 * removed from MIPS R6. Use bit 5
+				 * (Pref_StoreStreamed).
+				 */
+				pref_dst_mode = Pref_StoreStreamed;
+			else
+				pref_dst_mode = Pref_PrepareForStore;
 			break;
 			break;
 		}
 		}
 	} else {
 	} else {
@@ -214,7 +236,7 @@ static inline void build_clear_pref(u32 **buf, int off)
 		return;
 		return;
 
 
 	if (pref_bias_clear_store) {
 	if (pref_bias_clear_store) {
-		uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
+		_uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
 			    A0);
 			    A0);
 	} else if (cache_line_size == (half_clear_loop_size << 1)) {
 	} else if (cache_line_size == (half_clear_loop_size << 1)) {
 		if (cpu_has_cache_cdex_s) {
 		if (cpu_has_cache_cdex_s) {
@@ -357,7 +379,7 @@ static inline void build_copy_load_pref(u32 **buf, int off)
 		return;
 		return;
 
 
 	if (pref_bias_copy_load)
 	if (pref_bias_copy_load)
-		uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1);
+		_uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1);
 }
 }
 
 
 static inline void build_copy_store_pref(u32 **buf, int off)
 static inline void build_copy_store_pref(u32 **buf, int off)
@@ -366,7 +388,7 @@ static inline void build_copy_store_pref(u32 **buf, int off)
 		return;
 		return;
 
 
 	if (pref_bias_copy_store) {
 	if (pref_bias_copy_store) {
-		uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
+		_uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
 			    A0);
 			    A0);
 	} else if (cache_line_size == (half_copy_loop_size << 1)) {
 	} else if (cache_line_size == (half_copy_loop_size << 1)) {
 		if (cpu_has_cache_cdex_s) {
 		if (cpu_has_cache_cdex_s) {

+ 3 - 1
arch/mips/mm/sc-mips.c

@@ -81,6 +81,7 @@ static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
 	case CPU_PROAPTIV:
 	case CPU_PROAPTIV:
 	case CPU_P5600:
 	case CPU_P5600:
 	case CPU_BMIPS5000:
 	case CPU_BMIPS5000:
+	case CPU_QEMU_GENERIC:
 		if (config2 & (1 << 12))
 		if (config2 & (1 << 12))
 			return 0;
 			return 0;
 	}
 	}
@@ -104,7 +105,8 @@ static inline int __init mips_sc_probe(void)
 
 
 	/* Ignore anything but MIPSxx processors */
 	/* Ignore anything but MIPSxx processors */
 	if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
 	if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
-			      MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)))
+			      MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 |
+			      MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)))
 		return 0;
 		return 0;
 
 
 	/* Does this MIPS32/MIPS64 CPU have a config2 register? */
 	/* Does this MIPS32/MIPS64 CPU have a config2 register? */

+ 4 - 3
arch/mips/mm/tlbex.c

@@ -501,7 +501,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
 	case tlb_indexed: tlbw = uasm_i_tlbwi; break;
 	case tlb_indexed: tlbw = uasm_i_tlbwi; break;
 	}
 	}
 
 
-	if (cpu_has_mips_r2) {
+	if (cpu_has_mips_r2_exec_hazard) {
 		/*
 		/*
 		 * The architecture spec says an ehb is required here,
 		 * The architecture spec says an ehb is required here,
 		 * but a number of cores do not have the hazard and
 		 * but a number of cores do not have the hazard and
@@ -514,6 +514,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
 		case CPU_PROAPTIV:
 		case CPU_PROAPTIV:
 		case CPU_P5600:
 		case CPU_P5600:
 		case CPU_M5150:
 		case CPU_M5150:
+		case CPU_QEMU_GENERIC:
 			break;
 			break;
 
 
 		default:
 		default:
@@ -1952,7 +1953,7 @@ static void build_r4000_tlb_load_handler(void)
 
 
 		switch (current_cpu_type()) {
 		switch (current_cpu_type()) {
 		default:
 		default:
-			if (cpu_has_mips_r2) {
+			if (cpu_has_mips_r2_exec_hazard) {
 				uasm_i_ehb(&p);
 				uasm_i_ehb(&p);
 
 
 		case CPU_CAVIUM_OCTEON:
 		case CPU_CAVIUM_OCTEON:
@@ -2019,7 +2020,7 @@ static void build_r4000_tlb_load_handler(void)
 
 
 		switch (current_cpu_type()) {
 		switch (current_cpu_type()) {
 		default:
 		default:
-			if (cpu_has_mips_r2) {
+			if (cpu_has_mips_r2_exec_hazard) {
 				uasm_i_ehb(&p);
 				uasm_i_ehb(&p);
 
 
 		case CPU_CAVIUM_OCTEON:
 		case CPU_CAVIUM_OCTEON:

+ 32 - 0
arch/mips/mm/uasm-mips.c

@@ -38,6 +38,14 @@
 	 | (e) << RE_SH						\
 	 | (e) << RE_SH						\
 	 | (f) << FUNC_SH)
 	 | (f) << FUNC_SH)
 
 
+/* This macro sets the non-variable bits of an R6 instruction. */
+#define M6(a, b, c, d, e)					\
+	((a) << OP_SH						\
+	 | (b) << RS_SH						\
+	 | (c) << RT_SH						\
+	 | (d) << SIMM9_SH					\
+	 | (e) << FUNC_SH)
+
 /* Define these when we are not the ISA the kernel is being compiled with. */
 /* Define these when we are not the ISA the kernel is being compiled with. */
 #ifdef CONFIG_CPU_MICROMIPS
 #ifdef CONFIG_CPU_MICROMIPS
 #define CL_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
 #define CL_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
@@ -62,7 +70,11 @@ static struct insn insn_table[] = {
 	{ insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
 	{ insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
 	{ insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
 	{ insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
 	{ insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
 	{ insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+#ifndef CONFIG_CPU_MIPSR6
 	{ insn_cache,  M(cache_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
 	{ insn_cache,  M(cache_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+#else
+	{ insn_cache,  M6(cache_op, 0, 0, 0, cache6_op),  RS | RT | SIMM9 },
+#endif
 	{ insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
 	{ insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
 	{ insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
 	{ insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
 	{ insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
 	{ insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
@@ -85,13 +97,22 @@ static struct insn insn_table[] = {
 	{ insn_jal,  M(jal_op, 0, 0, 0, 0, 0),	JIMM },
 	{ insn_jal,  M(jal_op, 0, 0, 0, 0, 0),	JIMM },
 	{ insn_jalr,  M(spec_op, 0, 0, 0, 0, jalr_op), RS | RD },
 	{ insn_jalr,  M(spec_op, 0, 0, 0, 0, jalr_op), RS | RD },
 	{ insn_j,  M(j_op, 0, 0, 0, 0, 0),  JIMM },
 	{ insn_j,  M(j_op, 0, 0, 0, 0, 0),  JIMM },
+#ifndef CONFIG_CPU_MIPSR6
 	{ insn_jr,  M(spec_op, 0, 0, 0, 0, jr_op),  RS },
 	{ insn_jr,  M(spec_op, 0, 0, 0, 0, jr_op),  RS },
+#else
+	{ insn_jr,  M(spec_op, 0, 0, 0, 0, jalr_op),  RS },
+#endif
 	{ insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
 	{ insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
 	{ insn_ld,  M(ld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
 	{ insn_ld,  M(ld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
 	{ insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
 	{ insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
 	{ insn_lh,  M(lh_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
 	{ insn_lh,  M(lh_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+#ifndef CONFIG_CPU_MIPSR6
 	{ insn_lld,  M(lld_op, 0, 0, 0, 0, 0),	RS | RT | SIMM },
 	{ insn_lld,  M(lld_op, 0, 0, 0, 0, 0),	RS | RT | SIMM },
 	{ insn_ll,  M(ll_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
 	{ insn_ll,  M(ll_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+#else
+	{ insn_lld,  M6(spec3_op, 0, 0, 0, lld6_op),  RS | RT | SIMM9 },
+	{ insn_ll,  M6(spec3_op, 0, 0, 0, ll6_op),  RS | RT | SIMM9 },
+#endif
 	{ insn_lui,  M(lui_op, 0, 0, 0, 0, 0),	RT | SIMM },
 	{ insn_lui,  M(lui_op, 0, 0, 0, 0, 0),	RT | SIMM },
 	{ insn_lw,  M(lw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
 	{ insn_lw,  M(lw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
 	{ insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
 	{ insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
@@ -104,11 +125,20 @@ static struct insn insn_table[] = {
 	{ insn_mul, M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD},
 	{ insn_mul, M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD},
 	{ insn_ori,  M(ori_op, 0, 0, 0, 0, 0),	RS | RT | UIMM },
 	{ insn_ori,  M(ori_op, 0, 0, 0, 0, 0),	RS | RT | UIMM },
 	{ insn_or,  M(spec_op, 0, 0, 0, 0, or_op),  RS | RT | RD },
 	{ insn_or,  M(spec_op, 0, 0, 0, 0, or_op),  RS | RT | RD },
+#ifndef CONFIG_CPU_MIPSR6
 	{ insn_pref,  M(pref_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
 	{ insn_pref,  M(pref_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+#else
+	{ insn_pref,  M6(spec3_op, 0, 0, 0, pref6_op),  RS | RT | SIMM9 },
+#endif
 	{ insn_rfe,  M(cop0_op, cop_op, 0, 0, 0, rfe_op),  0 },
 	{ insn_rfe,  M(cop0_op, cop_op, 0, 0, 0, rfe_op),  0 },
 	{ insn_rotr,  M(spec_op, 1, 0, 0, 0, srl_op),  RT | RD | RE },
 	{ insn_rotr,  M(spec_op, 1, 0, 0, 0, srl_op),  RT | RD | RE },
+#ifndef CONFIG_CPU_MIPSR6
 	{ insn_scd,  M(scd_op, 0, 0, 0, 0, 0),	RS | RT | SIMM },
 	{ insn_scd,  M(scd_op, 0, 0, 0, 0, 0),	RS | RT | SIMM },
 	{ insn_sc,  M(sc_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
 	{ insn_sc,  M(sc_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+#else
+	{ insn_scd,  M6(spec3_op, 0, 0, 0, scd6_op),  RS | RT | SIMM9 },
+	{ insn_sc,  M6(spec3_op, 0, 0, 0, sc6_op),  RS | RT | SIMM9 },
+#endif
 	{ insn_sd,  M(sd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
 	{ insn_sd,  M(sd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
 	{ insn_sll,  M(spec_op, 0, 0, 0, 0, sll_op),  RT | RD | RE },
 	{ insn_sll,  M(spec_op, 0, 0, 0, 0, sll_op),  RT | RD | RE },
 	{ insn_sllv,  M(spec_op, 0, 0, 0, 0, sllv_op),  RS | RT | RD },
 	{ insn_sllv,  M(spec_op, 0, 0, 0, 0, sllv_op),  RS | RT | RD },
@@ -198,6 +228,8 @@ static void build_insn(u32 **buf, enum opcode opc, ...)
 		op |= build_set(va_arg(ap, u32));
 		op |= build_set(va_arg(ap, u32));
 	if (ip->fields & SCIMM)
 	if (ip->fields & SCIMM)
 		op |= build_scimm(va_arg(ap, u32));
 		op |= build_scimm(va_arg(ap, u32));
+	if (ip->fields & SIMM9)
+		op |= build_scimm9(va_arg(ap, u32));
 	va_end(ap);
 	va_end(ap);
 
 
 	**buf = op;
 	**buf = op;

+ 12 - 1
arch/mips/mm/uasm.c

@@ -24,7 +24,8 @@ enum fields {
 	JIMM = 0x080,
 	JIMM = 0x080,
 	FUNC = 0x100,
 	FUNC = 0x100,
 	SET = 0x200,
 	SET = 0x200,
-	SCIMM = 0x400
+	SCIMM = 0x400,
+	SIMM9 = 0x800,
 };
 };
 
 
 #define OP_MASK		0x3f
 #define OP_MASK		0x3f
@@ -41,6 +42,8 @@ enum fields {
 #define FUNC_SH		0
 #define FUNC_SH		0
 #define SET_MASK	0x7
 #define SET_MASK	0x7
 #define SET_SH		0
 #define SET_SH		0
+#define SIMM9_SH	7
+#define SIMM9_MASK	0x1ff
 
 
 enum opcode {
 enum opcode {
 	insn_invalid,
 	insn_invalid,
@@ -116,6 +119,14 @@ static inline u32 build_scimm(u32 arg)
 	return (arg & SCIMM_MASK) << SCIMM_SH;
 	return (arg & SCIMM_MASK) << SCIMM_SH;
 }
 }
 
 
+static inline u32 build_scimm9(s32 arg)
+{
+	WARN((arg > 0xff || arg < -0x100),
+	       KERN_WARNING "Micro-assembler field overflow\n");
+
+	return (arg & SIMM9_MASK) << SIMM9_SH;
+}
+
 static inline u32 build_func(u32 arg)
 static inline u32 build_func(u32 arg)
 {
 {
 	WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n");
 	WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n");