Browse Source

Merge tag 'xtensa-next-20140224' of git://github.com/czankel/xtensa-linux

Pull tensa fixes from Chris Zankel:
 "This series includes fixes for potentially serious bugs in the
  routines spilling processor registers to stack, as well as other
  issues and compiler errors and warnings.

   - allow booting xtfpga on boards with new uBoot and >128MBytes memory
   - drop nonexistent GPIO32 support from fsf variant
   - don't select USE_GENERIC_SMP_HELPERS
   - enable common clock framework support, set up ethoc clock on xtfpga
   - wire up sched_setattr and sched_getattr syscalls.
   - fix system call to spill the processor registers to stack.
   - improve kernel macro to spill the processor registers
   - export ccount_freq symbol
   - fix undefined symbol warning"

* tag 'xtensa-next-20140224' of git://github.com/czankel/xtensa-linux:
  xtensa: wire up sched_setattr and sched_getattr syscalls
  xtensa: xtfpga: set ethoc clock frequency
  xtensa: xtfpga: use common clock framework
  xtensa: support common clock framework
  xtensa: no need to select USE_GENERIC_SMP_HELPERS
  xtensa: fsf: drop nonexistent GPIO32 support
  xtensa: don't pass high memory to bootmem allocator
  xtensa: fix fast_syscall_spill_registers
  xtensa: fix fast_syscall_spill_registers
  xtensa: save current register frame in fast_syscall_spill_registers_fixup
  xtensa: introduce spill_registers_kernel macro
  xtensa: export ccount_freq
  xtensa: fix warning '"CONFIG_OF" is not defined'
Linus Torvalds 11 years ago
parent
commit
bafb81927e

+ 1 - 2
arch/xtensa/Kconfig

@@ -20,6 +20,7 @@ config XTENSA
 	select HAVE_FUNCTION_TRACER
 	select HAVE_IRQ_TIME_ACCOUNTING
 	select HAVE_PERF_EVENTS
+	select COMMON_CLK
 	help
 	  Xtensa processors are 32-bit RISC machines designed by Tensilica
 	  primarily for embedded systems.  These processors are both
@@ -80,7 +81,6 @@ choice
 config XTENSA_VARIANT_FSF
 	bool "fsf - default (not generic) configuration"
 	select MMU
-	select HAVE_XTENSA_GPIO32
 
 config XTENSA_VARIANT_DC232B
 	bool "dc232b - Diamond 232L Standard Core Rev.B (LE)"
@@ -135,7 +135,6 @@ config HAVE_SMP
 config SMP
 	bool "Enable Symmetric multi-processing support"
 	depends on HAVE_SMP
-	select USE_GENERIC_SMP_HELPERS
 	select GENERIC_SMP_IDLE_THREAD
 	help
 	  Enabled SMP Software; allows more than one CPU/CORE

+ 9 - 3
arch/xtensa/boot/dts/xtfpga.dtsi

@@ -35,6 +35,13 @@
 		interrupt-controller;
 	};
 
+	clocks {
+		osc: main-oscillator {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+		};
+	};
+
 	serial0: serial@fd050020 {
 		device_type = "serial";
 		compatible = "ns16550a";
@@ -42,9 +49,7 @@
 		reg = <0xfd050020 0x20>;
 		reg-shift = <2>;
 		interrupts = <0 1>; /* external irq 0 */
-		/* Filled in by platform_setup from FPGA register
-		 * clock-frequency = <100000000>;
-		 */
+		clocks = <&osc>;
 	};
 
 	enet0: ethoc@fd030000 {
@@ -52,5 +57,6 @@
 		reg = <0xfd030000 0x4000 0xfd800000 0x4000>;
 		interrupts = <1 1>; /* external irq 1 */
 		local-mac-address = [00 50 c2 13 6f 00];
+		clocks = <&osc>;
 	};
 };

+ 1 - 1
arch/xtensa/include/asm/io.h

@@ -25,7 +25,7 @@
 
 #ifdef CONFIG_MMU
 
-#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF
+#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
 extern unsigned long xtensa_kio_paddr;
 
 static inline unsigned long xtensa_get_kio_paddr(void)

+ 28 - 16
arch/xtensa/include/asm/traps.h

@@ -23,25 +23,37 @@ void secondary_trap_init(void);
 
 static inline void spill_registers(void)
 {
-
+#if XCHAL_NUM_AREGS > 16
 	__asm__ __volatile__ (
-		"movi	a14, "__stringify((1 << PS_EXCM_BIT) | LOCKLEVEL)"\n\t"
-		"mov	a12, a0\n\t"
-		"rsr	a13, sar\n\t"
-		"xsr	a14, ps\n\t"
-		"movi	a0, _spill_registers\n\t"
-		"rsync\n\t"
-		"callx0 a0\n\t"
-		"mov	a0, a12\n\t"
-		"wsr	a13, sar\n\t"
-		"wsr	a14, ps\n\t"
-		: :
-#if defined(CONFIG_FRAME_POINTER)
-		: "a2", "a3", "a4",       "a11", "a12", "a13", "a14", "a15",
+		"	call12	1f\n"
+		"	_j	2f\n"
+		"	retw\n"
+		"	.align	4\n"
+		"1:\n"
+		"	_entry	a1, 48\n"
+		"	addi	a12, a0, 3\n"
+#if XCHAL_NUM_AREGS > 32
+		"	.rept	(" __stringify(XCHAL_NUM_AREGS) " - 32) / 12\n"
+		"	_entry	a1, 48\n"
+		"	mov	a12, a0\n"
+		"	.endr\n"
+#endif
+		"	_entry	a1, 48\n"
+#if XCHAL_NUM_AREGS % 12 == 0
+		"	mov	a8, a8\n"
+#elif XCHAL_NUM_AREGS % 12 == 4
+		"	mov	a12, a12\n"
+#elif XCHAL_NUM_AREGS % 12 == 8
+		"	mov	a4, a4\n"
+#endif
+		"	retw\n"
+		"2:\n"
+		: : : "a12", "a13", "memory");
 #else
-		: "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15",
+	__asm__ __volatile__ (
+		"	mov	a12, a12\n"
+		: : : "memory");
 #endif
-		  "memory");
 }
 
 #endif /* _XTENSA_TRAPS_H */

+ 1 - 1
arch/xtensa/include/asm/vectors.h

@@ -25,7 +25,7 @@
 #define XCHAL_KIO_DEFAULT_PADDR		0xf0000000
 #define XCHAL_KIO_SIZE			0x10000000
 
-#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF
+#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
 #define XCHAL_KIO_PADDR			xtensa_get_kio_paddr()
 #else
 #define XCHAL_KIO_PADDR			XCHAL_KIO_DEFAULT_PADDR

+ 6 - 1
arch/xtensa/include/uapi/asm/unistd.h

@@ -734,7 +734,12 @@ __SYSCALL(332, sys_finit_module, 3)
 #define __NR_accept4				333
 __SYSCALL(333, sys_accept4, 4)
 
-#define __NR_syscall_count			334
+#define __NR_sched_setattr			334
+__SYSCALL(334, sys_sched_setattr, 2)
+#define __NR_sched_getattr			335
+__SYSCALL(335, sys_sched_getattr, 3)
+
+#define __NR_syscall_count			336
 
 /*
  * sysxtensa syscall handler

+ 231 - 218
arch/xtensa/kernel/entry.S

@@ -1081,196 +1081,53 @@ ENTRY(fast_syscall_spill_registers)
 
 	rsr	a0, sar
 	s32i	a3, a2, PT_AREG3
-	s32i	a4, a2, PT_AREG4
-	s32i	a0, a2, PT_AREG5	# store SAR to PT_AREG5
+	s32i	a0, a2, PT_SAR
 
-	/* The spill routine might clobber a7, a11, and a15. */
+	/* The spill routine might clobber a4, a7, a8, a11, a12, and a15. */
 
+	s32i	a4, a2, PT_AREG4
 	s32i	a7, a2, PT_AREG7
+	s32i	a8, a2, PT_AREG8
 	s32i	a11, a2, PT_AREG11
+	s32i	a12, a2, PT_AREG12
 	s32i	a15, a2, PT_AREG15
 
-	call0	_spill_registers	# destroys a3, a4, and SAR
-
-	/* Advance PC, restore registers and SAR, and return from exception. */
-
-	l32i	a3, a2, PT_AREG5
-	l32i	a4, a2, PT_AREG4
-	l32i	a0, a2, PT_AREG0
-	wsr	a3, sar
-	l32i	a3, a2, PT_AREG3
-
-	/* Restore clobbered registers. */
-
-	l32i	a7, a2, PT_AREG7
-	l32i	a11, a2, PT_AREG11
-	l32i	a15, a2, PT_AREG15
-
-	movi	a2, 0
-	rfe
-
-ENDPROC(fast_syscall_spill_registers)
-
-/* Fixup handler.
- *
- * We get here if the spill routine causes an exception, e.g. tlb miss.
- * We basically restore WINDOWBASE and WINDOWSTART to the condition when
- * we entered the spill routine and jump to the user exception handler.
- *
- * a0: value of depc, original value in depc
- * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
- * a3: exctable, original value in excsave1
- */
-
-ENTRY(fast_syscall_spill_registers_fixup)
-
-	rsr	a2, windowbase	# get current windowbase (a2 is saved)
-	xsr	a0, depc	# restore depc and a0
-	ssl	a2		# set shift (32 - WB)
-
-	/* We need to make sure the current registers (a0-a3) are preserved.
-	 * To do this, we simply set the bit for the current window frame
-	 * in WS, so that the exception handlers save them to the task stack.
-	 */
-
-	xsr	a3, excsave1	# get spill-mask
-	slli	a3, a3, 1	# shift left by one
-
-	slli	a2, a3, 32-WSBITS
-	src	a2, a3, a2	# a2 = xxwww1yyxxxwww1yy......
-	wsr	a2, windowstart	# set corrected windowstart
-
-	srli	a3, a3, 1
-	rsr	a2, excsave1
-	l32i	a2, a2, EXC_TABLE_DOUBLE_SAVE	# restore a2
-	xsr	a2, excsave1
-	s32i	a3, a2, EXC_TABLE_DOUBLE_SAVE	# save a3
-	l32i	a3, a2, EXC_TABLE_PARAM	# original WB (in user task)
-	xsr	a2, excsave1
-
-	/* Return to the original (user task) WINDOWBASE.
-	 * We leave the following frame behind:
-	 * a0, a1, a2	same
-	 * a3:		trashed (saved in EXC_TABLE_DOUBLE_SAVE)
-	 * depc:	depc (we have to return to that address)
-	 * excsave_1:	exctable
-	 */
-
-	wsr	a3, windowbase
-	rsync
-
-	/* We are now in the original frame when we entered _spill_registers:
-	 *  a0: return address
-	 *  a1: used, stack pointer
-	 *  a2: kernel stack pointer
-	 *  a3: available
-	 *  depc: exception address
-	 *  excsave: exctable
-	 * Note: This frame might be the same as above.
-	 */
-
-	/* Setup stack pointer. */
-
-	addi	a2, a2, -PT_USER_SIZE
-	s32i	a0, a2, PT_AREG0
-
-	/* Make sure we return to this fixup handler. */
-
-	movi	a3, fast_syscall_spill_registers_fixup_return
-	s32i	a3, a2, PT_DEPC		# setup depc
-
-	/* Jump to the exception handler. */
-
-	rsr	a3, excsave1
-	rsr	a0, exccause
-	addx4	a0, a0, a3              	# find entry in table
-	l32i	a0, a0, EXC_TABLE_FAST_USER     # load handler
-	l32i	a3, a3, EXC_TABLE_DOUBLE_SAVE
-	jx	a0
-
-ENDPROC(fast_syscall_spill_registers_fixup)
-
-ENTRY(fast_syscall_spill_registers_fixup_return)
-
-	/* When we return here, all registers have been restored (a2: DEPC) */
-
-	wsr	a2, depc		# exception address
-
-	/* Restore fixup handler. */
-
-	rsr	a2, excsave1
-	s32i	a3, a2, EXC_TABLE_DOUBLE_SAVE
-	movi	a3, fast_syscall_spill_registers_fixup
-	s32i	a3, a2, EXC_TABLE_FIXUP
-	rsr	a3, windowbase
-	s32i	a3, a2, EXC_TABLE_PARAM
-	l32i	a2, a2, EXC_TABLE_KSTK
-
-	/* Load WB at the time the exception occurred. */
-
-	rsr	a3, sar			# WB is still in SAR
-	neg	a3, a3
-	wsr	a3, windowbase
-	rsync
-
-	rsr	a3, excsave1
-	l32i	a3, a3, EXC_TABLE_DOUBLE_SAVE
-
-	rfde
-
-ENDPROC(fast_syscall_spill_registers_fixup_return)
-
-/*
- * spill all registers.
- *
- * This is not a real function. The following conditions must be met:
- *
- *  - must be called with call0.
- *  - uses a3, a4 and SAR.
- *  - the last 'valid' register of each frame are clobbered.
- *  - the caller must have registered a fixup handler
- *    (or be inside a critical section)
- *  - PS_EXCM must be set (PS_WOE cleared?)
- */
-
-ENTRY(_spill_registers)
-
 	/*
 	 * Rotate ws so that the current windowbase is at bit 0.
 	 * Assume ws = xxxwww1yy (www1 current window frame).
 	 * Rotate ws right so that a4 = yyxxxwww1.
 	 */
 
-	rsr	a4, windowbase
+	rsr	a0, windowbase
 	rsr	a3, windowstart		# a3 = xxxwww1yy
-	ssr	a4			# holds WB
-	slli	a4, a3, WSBITS
-	or	a3, a3, a4		# a3 = xxxwww1yyxxxwww1yy
+	ssr	a0			# holds WB
+	slli	a0, a3, WSBITS
+	or	a3, a3, a0		# a3 = xxxwww1yyxxxwww1yy
 	srl	a3, a3			# a3 = 00xxxwww1yyxxxwww1
 
 	/* We are done if there are no more than the current register frame. */
 
 	extui	a3, a3, 1, WSBITS-1	# a3 = 0yyxxxwww
-	movi	a4, (1 << (WSBITS-1))
+	movi	a0, (1 << (WSBITS-1))
 	_beqz	a3, .Lnospill		# only one active frame? jump
 
 	/* We want 1 at the top, so that we return to the current windowbase */
 
-	or	a3, a3, a4		# 1yyxxxwww
+	or	a3, a3, a0		# 1yyxxxwww
 
 	/* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
 
 	wsr	a3, windowstart		# save shifted windowstart
-	neg	a4, a3
-	and	a3, a4, a3		# first bit set from right: 000010000
+	neg	a0, a3
+	and	a3, a0, a3		# first bit set from right: 000010000
 
-	ffs_ws	a4, a3			# a4: shifts to skip empty frames
+	ffs_ws	a0, a3			# a0: shifts to skip empty frames
 	movi	a3, WSBITS
-	sub	a4, a3, a4		# WSBITS-a4:number of 0-bits from right
-	ssr	a4			# save in SAR for later.
+	sub	a0, a3, a0		# WSBITS-a0:number of 0-bits from right
+	ssr	a0			# save in SAR for later.
 
 	rsr	a3, windowbase
-	add	a3, a3, a4
+	add	a3, a3, a0
 	wsr	a3, windowbase
 	rsync
 
@@ -1285,22 +1142,6 @@ ENTRY(_spill_registers)
 	 * we have to save 4,8. or 12 registers.
 	 */
 
-	_bbsi.l	a3, 1, .Lc4
-	_bbsi.l	a3, 2, .Lc8
-
-	/* Special case: we have a call12-frame starting at a4. */
-
-	_bbci.l	a3, 3, .Lc12	# bit 3 shouldn't be zero! (Jump to Lc12 first)
-
-	s32e	a4, a1, -16	# a1 is valid with an empty spill area
-	l32e	a4, a5, -12
-	s32e	a8, a4, -48
-	mov	a8, a4
-	l32e	a4, a1, -16
-	j	.Lc12c
-
-.Lnospill:
-	ret
 
 .Lloop: _bbsi.l	a3, 1, .Lc4
 	_bbci.l	a3, 2, .Lc12
@@ -1314,20 +1155,10 @@ ENTRY(_spill_registers)
 	s32e	a9, a4, -28
 	s32e	a10, a4, -24
 	s32e	a11, a4, -20
-
 	srli	a11, a3, 2		# shift windowbase by 2
 	rotw	2
 	_bnei	a3, 1, .Lloop
-
-.Lexit: /* Done. Do the final rotation, set WS, and return. */
-
-	rotw	1
-	rsr	a3, windowbase
-	ssl	a3
-	movi	a3, 1
-	sll	a3, a3
-	wsr	a3, windowstart
-	ret
+	j	.Lexit
 
 .Lc4:	s32e	a4, a9, -16
 	s32e	a5, a9, -12
@@ -1343,11 +1174,11 @@ ENTRY(_spill_registers)
 
 	/* 12-register frame (call12) */
 
-	l32e	a2, a5, -12
-	s32e	a8, a2, -48
-	mov	a8, a2
+	l32e	a0, a5, -12
+	s32e	a8, a0, -48
+	mov	a8, a0
 
-.Lc12c: s32e	a9, a8, -44
+	s32e	a9, a8, -44
 	s32e	a10, a8, -40
 	s32e	a11, a8, -36
 	s32e	a12, a8, -32
@@ -1367,30 +1198,54 @@ ENTRY(_spill_registers)
 	 */
 
 	rotw	1
-	mov	a5, a13
+	mov	a4, a13
 	rotw	-1
 
-	s32e	a4, a9, -16
-	s32e	a5, a9, -12
-	s32e	a6, a9, -8
-	s32e	a7, a9, -4
+	s32e	a4, a8, -16
+	s32e	a5, a8, -12
+	s32e	a6, a8, -8
+	s32e	a7, a8, -4
 
 	rotw	3
 
 	_beqi	a3, 1, .Lexit
 	j	.Lloop
 
-.Linvalid_mask:
+.Lexit:
 
-	/* We get here because of an unrecoverable error in the window
-	 * registers. If we are in user space, we kill the application,
-	 * however, this condition is unrecoverable in kernel space.
-	 */
+	/* Done. Do the final rotation and set WS */
+
+	rotw	1
+	rsr	a3, windowbase
+	ssl	a3
+	movi	a3, 1
+	sll	a3, a3
+	wsr	a3, windowstart
+.Lnospill:
+
+	/* Advance PC, restore registers and SAR, and return from exception. */
+
+	l32i	a3, a2, PT_SAR
+	l32i	a0, a2, PT_AREG0
+	wsr	a3, sar
+	l32i	a3, a2, PT_AREG3
 
-	rsr	a0, ps
-	_bbci.l	a0, PS_UM_BIT, 1f
+	/* Restore clobbered registers. */
 
-	/* User space: Setup a dummy frame and kill application.
+	l32i	a4, a2, PT_AREG4
+	l32i	a7, a2, PT_AREG7
+	l32i	a8, a2, PT_AREG8
+	l32i	a11, a2, PT_AREG11
+	l32i	a12, a2, PT_AREG12
+	l32i	a15, a2, PT_AREG15
+
+	movi	a2, 0
+	rfe
+
+.Linvalid_mask:
+
+	/* We get here because of an unrecoverable error in the window
+	 * registers, so set up a dummy frame and kill the user application.
 	 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
 	 */
 
@@ -1414,14 +1269,136 @@ ENTRY(_spill_registers)
 	movi	a4, do_exit
 	callx4	a4
 
-1:	/* Kernel space: PANIC! */
+	/* shouldn't return, so panic */
 
 	wsr	a0, excsave1
 	movi	a0, unrecoverable_exception
 	callx0	a0		# should not return
 1:	j	1b
 
-ENDPROC(_spill_registers)
+
+ENDPROC(fast_syscall_spill_registers)
+
+/* Fixup handler.
+ *
+ * We get here if the spill routine causes an exception, e.g. tlb miss.
+ * We basically restore WINDOWBASE and WINDOWSTART to the condition when
+ * we entered the spill routine and jump to the user exception handler.
+ *
+ * Note that we only need to restore the bits in windowstart that have not
+ * been spilled yet by the _spill_register routine. Luckily, a3 contains a
+ * rotated windowstart with only those bits set for frames that haven't been
+ * spilled yet. Because a3 is rotated such that bit 0 represents the register
+ * frame for the current windowbase - 1, we need to rotate a3 left by the
+ * value of the current windowbase + 1 and move it to windowstart.
+ *
+ * a0: value of depc, original value in depc
+ * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
+ * a3: exctable, original value in excsave1
+ */
+
+ENTRY(fast_syscall_spill_registers_fixup)
+
+	rsr	a2, windowbase	# get current windowbase (a2 is saved)
+	xsr	a0, depc	# restore depc and a0
+	ssl	a2		# set shift (32 - WB)
+
+	/* We need to make sure the current registers (a0-a3) are preserved.
+	 * To do this, we simply set the bit for the current window frame
+	 * in WS, so that the exception handlers save them to the task stack.
+	 *
+	 * Note: we use a3 to set the windowbase, so we take a special care
+	 * of it, saving it in the original _spill_registers frame across
+	 * the exception handler call.
+	 */
+
+	xsr	a3, excsave1	# get spill-mask
+	slli	a3, a3, 1	# shift left by one
+	addi	a3, a3, 1	# set the bit for the current window frame
+
+	slli	a2, a3, 32-WSBITS
+	src	a2, a3, a2	# a2 = xxwww1yyxxxwww1yy......
+	wsr	a2, windowstart	# set corrected windowstart
+
+	srli	a3, a3, 1
+	rsr	a2, excsave1
+	l32i	a2, a2, EXC_TABLE_DOUBLE_SAVE	# restore a2
+	xsr	a2, excsave1
+	s32i	a3, a2, EXC_TABLE_DOUBLE_SAVE	# save a3
+	l32i	a3, a2, EXC_TABLE_PARAM	# original WB (in user task)
+	xsr	a2, excsave1
+
+	/* Return to the original (user task) WINDOWBASE.
+	 * We leave the following frame behind:
+	 * a0, a1, a2	same
+	 * a3:		trashed (saved in EXC_TABLE_DOUBLE_SAVE)
+	 * depc:	depc (we have to return to that address)
+	 * excsave_1:	exctable
+	 */
+
+	wsr	a3, windowbase
+	rsync
+
+	/* We are now in the original frame when we entered _spill_registers:
+	 *  a0: return address
+	 *  a1: used, stack pointer
+	 *  a2: kernel stack pointer
+	 *  a3: available
+	 *  depc: exception address
+	 *  excsave: exctable
+	 * Note: This frame might be the same as above.
+	 */
+
+	/* Setup stack pointer. */
+
+	addi	a2, a2, -PT_USER_SIZE
+	s32i	a0, a2, PT_AREG0
+
+	/* Make sure we return to this fixup handler. */
+
+	movi	a3, fast_syscall_spill_registers_fixup_return
+	s32i	a3, a2, PT_DEPC		# setup depc
+
+	/* Jump to the exception handler. */
+
+	rsr	a3, excsave1
+	rsr	a0, exccause
+	addx4	a0, a0, a3              	# find entry in table
+	l32i	a0, a0, EXC_TABLE_FAST_USER     # load handler
+	l32i	a3, a3, EXC_TABLE_DOUBLE_SAVE
+	jx	a0
+
+ENDPROC(fast_syscall_spill_registers_fixup)
+
+ENTRY(fast_syscall_spill_registers_fixup_return)
+
+	/* When we return here, all registers have been restored (a2: DEPC) */
+
+	wsr	a2, depc		# exception address
+
+	/* Restore fixup handler. */
+
+	rsr	a2, excsave1
+	s32i	a3, a2, EXC_TABLE_DOUBLE_SAVE
+	movi	a3, fast_syscall_spill_registers_fixup
+	s32i	a3, a2, EXC_TABLE_FIXUP
+	rsr	a3, windowbase
+	s32i	a3, a2, EXC_TABLE_PARAM
+	l32i	a2, a2, EXC_TABLE_KSTK
+
+	/* Load WB at the time the exception occurred. */
+
+	rsr	a3, sar			# WB is still in SAR
+	neg	a3, a3
+	wsr	a3, windowbase
+	rsync
+
+	rsr	a3, excsave1
+	l32i	a3, a3, EXC_TABLE_DOUBLE_SAVE
+
+	rfde
+
+ENDPROC(fast_syscall_spill_registers_fixup_return)
 
 #ifdef CONFIG_MMU
 /*
@@ -1794,6 +1771,43 @@ ENTRY(system_call)
 
 ENDPROC(system_call)
 
+/*
+ * Spill live registers on the kernel stack macro.
+ *
+ * Entry condition: ps.woe is set, ps.excm is cleared
+ * Exit condition: windowstart has single bit set
+ * May clobber: a12, a13
+ */
+	.macro	spill_registers_kernel
+
+#if XCHAL_NUM_AREGS > 16
+	call12	1f
+	_j	2f
+	retw
+	.align	4
+1:
+	_entry	a1, 48
+	addi	a12, a0, 3
+#if XCHAL_NUM_AREGS > 32
+	.rept	(XCHAL_NUM_AREGS - 32) / 12
+	_entry	a1, 48
+	mov	a12, a0
+	.endr
+#endif
+	_entry	a1, 48
+#if XCHAL_NUM_AREGS % 12 == 0
+	mov	a8, a8
+#elif XCHAL_NUM_AREGS % 12 == 4
+	mov	a12, a12
+#elif XCHAL_NUM_AREGS % 12 == 8
+	mov	a4, a4
+#endif
+	retw
+2:
+#else
+	mov	a12, a12
+#endif
+	.endm
 
 /*
  * Task switch.
@@ -1806,21 +1820,20 @@ ENTRY(_switch_to)
 
 	entry	a1, 16
 
-	mov	a12, a2			# preserve 'prev' (a2)
-	mov	a13, a3			# and 'next' (a3)
+	mov	a10, a2			# preserve 'prev' (a2)
+	mov	a11, a3			# and 'next' (a3)
 
 	l32i	a4, a2, TASK_THREAD_INFO
 	l32i	a5, a3, TASK_THREAD_INFO
 
-	save_xtregs_user a4 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
+	save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
 
-	s32i	a0, a12, THREAD_RA	# save return address
-	s32i	a1, a12, THREAD_SP	# save stack pointer
+	s32i	a0, a10, THREAD_RA	# save return address
+	s32i	a1, a10, THREAD_SP	# save stack pointer
 
 	/* Disable ints while we manipulate the stack pointer. */
 
-	movi	a14, (1 << PS_EXCM_BIT) | LOCKLEVEL
-	xsr	a14, ps
+	rsil	a14, LOCKLEVEL
 	rsr	a3, excsave1
 	rsync
 	s32i	a3, a3, EXC_TABLE_FIXUP	/* enter critical section */
@@ -1835,7 +1848,7 @@ ENTRY(_switch_to)
 
 	/* Flush register file. */
 
-	call0	_spill_registers	# destroys a3, a4, and SAR
+	spill_registers_kernel
 
 	/* Set kernel stack (and leave critical section)
 	 * Note: It's save to set it here. The stack will not be overwritten
@@ -1851,13 +1864,13 @@ ENTRY(_switch_to)
 
 	/* restore context of the task 'next' */
 
-	l32i	a0, a13, THREAD_RA	# restore return address
-	l32i	a1, a13, THREAD_SP	# restore stack pointer
+	l32i	a0, a11, THREAD_RA	# restore return address
+	l32i	a1, a11, THREAD_SP	# restore stack pointer
 
-	load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
+	load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
 
 	wsr	a14, ps
-	mov	a2, a12			# return 'prev'
+	mov	a2, a10			# return 'prev'
 	rsync
 
 	retw

+ 2 - 0
arch/xtensa/kernel/setup.c

@@ -22,6 +22,7 @@
 #include <linux/bootmem.h>
 #include <linux/kernel.h>
 #include <linux/percpu.h>
+#include <linux/clk-provider.h>
 #include <linux/cpu.h>
 #include <linux/of_fdt.h>
 #include <linux/of_platform.h>
@@ -276,6 +277,7 @@ void __init early_init_devtree(void *params)
 
 static int __init xtensa_device_probe(void)
 {
+	of_clk_init(NULL);
 	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 	return 0;
 }

+ 1 - 0
arch/xtensa/kernel/time.c

@@ -30,6 +30,7 @@
 #include <asm/platform.h>
 
 unsigned long ccount_freq;		/* ccount Hz */
+EXPORT_SYMBOL(ccount_freq);
 
 static cycle_t ccount_read(struct clocksource *cs)
 {

+ 1 - 1
arch/xtensa/kernel/vectors.S

@@ -235,7 +235,7 @@ ENTRY(_DoubleExceptionVector)
 
 	/* Check for overflow/underflow exception, jump if overflow. */
 
-	_bbci.l	a0, 6, _DoubleExceptionVector_WindowOverflow
+	bbci.l	a0, 6, _DoubleExceptionVector_WindowOverflow
 
 	/*
 	 * Restart window underflow exception.

+ 0 - 2
arch/xtensa/kernel/xtensa_ksyms.c

@@ -122,9 +122,7 @@ EXPORT_SYMBOL(insw);
 EXPORT_SYMBOL(insl);
 
 extern long common_exception_return;
-extern long _spill_registers;
 EXPORT_SYMBOL(common_exception_return);
-EXPORT_SYMBOL(_spill_registers);
 
 #ifdef CONFIG_FUNCTION_TRACER
 EXPORT_SYMBOL(_mcount);

+ 9 - 4
arch/xtensa/mm/init.c

@@ -90,7 +90,7 @@ int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
 
 
 /*
- * Initialize the bootmem system and give it all the memory we have available.
+ * Initialize the bootmem system and give it all low memory we have available.
  */
 
 void __init bootmem_init(void)
@@ -142,9 +142,14 @@ void __init bootmem_init(void)
 
 	/* Add all remaining memory pieces into the bootmem map */
 
-	for (i=0; i<sysmem.nr_banks; i++)
-		free_bootmem(sysmem.bank[i].start,
-			     sysmem.bank[i].end - sysmem.bank[i].start);
+	for (i = 0; i < sysmem.nr_banks; i++) {
+		if (sysmem.bank[i].start >> PAGE_SHIFT < max_low_pfn) {
+			unsigned long end = min(max_low_pfn << PAGE_SHIFT,
+						sysmem.bank[i].end);
+			free_bootmem(sysmem.bank[i].start,
+				     end - sysmem.bank[i].start);
+		}
+	}
 
 }
 

+ 1 - 1
arch/xtensa/mm/mmu.c

@@ -39,7 +39,7 @@ void init_mmu(void)
 	set_itlbcfg_register(0);
 	set_dtlbcfg_register(0);
 #endif
-#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF
+#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
 	/*
 	 * Update the IO area mapping in case xtensa_kio_paddr has changed
 	 */

+ 4 - 3
arch/xtensa/platforms/xtfpga/setup.c

@@ -135,11 +135,11 @@ static void __init update_local_mac(struct device_node *node)
 
 static int __init machine_setup(void)
 {
-	struct device_node *serial;
+	struct device_node *clock;
 	struct device_node *eth = NULL;
 
-	for_each_compatible_node(serial, NULL, "ns16550a")
-		update_clock_frequency(serial);
+	for_each_node_by_name(clock, "main-oscillator")
+		update_clock_frequency(clock);
 
 	if ((eth = of_find_compatible_node(eth, NULL, "opencores,ethoc")))
 		update_local_mac(eth);
@@ -290,6 +290,7 @@ static int __init xtavnet_init(void)
 	 * knows whether they set it correctly on the DIP switches.
 	 */
 	pr_info("XTFPGA: Ethernet MAC %pM\n", ethoc_pdata.hwaddr);
+	ethoc_pdata.eth_clkfreq = *(long *)XTFPGA_CLKFRQ_VADDR;
 
 	return 0;
 }

+ 2 - 7
arch/xtensa/variants/fsf/include/variant/tie.h

@@ -18,13 +18,6 @@
 #define XCHAL_CP_MASK			0x00	/* bitmask of all CPs by ID */
 #define XCHAL_CP_PORT_MASK		0x00	/* bitmask of only port CPs */
 
-/*  Basic parameters of each coprocessor:  */
-#define XCHAL_CP7_NAME			"XTIOP"
-#define XCHAL_CP7_IDENT			XTIOP
-#define XCHAL_CP7_SA_SIZE		0	/* size of state save area */
-#define XCHAL_CP7_SA_ALIGN		1	/* min alignment of save area */
-#define XCHAL_CP_ID_XTIOP		7	/* coprocessor ID (0..7) */
-
 /*  Filler info for unassigned coprocessors, to simplify arrays etc:  */
 #define XCHAL_NCP_SA_SIZE		0
 #define XCHAL_NCP_SA_ALIGN		1
@@ -42,6 +35,8 @@
 #define XCHAL_CP5_SA_ALIGN		1
 #define XCHAL_CP6_SA_SIZE		0
 #define XCHAL_CP6_SA_ALIGN		1
+#define XCHAL_CP7_SA_SIZE		0
+#define XCHAL_CP7_SA_ALIGN		1
 
 /*  Save area for non-coprocessor optional and custom (TIE) state:  */
 #define XCHAL_NCP_SA_SIZE		0