Browse Source

Merge tag 'arc-4.4-rc1-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc

Pull ARC fixes from Vineet Gupta:
 "Found a couple of brown paper bag bugs with the prev pull request
  (including a SMP build breakage report from Guenter).  Since these are
  urgent I also decided to send over a bunch of other pending fixes
  which could have otherwise waited an rc or two.

  Summary:

   - A bunch of brown paper bag bugs (MAINTAINERS list email, SMP build
     failure)
   - cpu_relax() now compiler barrier for UP as well
   - handling of userspace Bus Errors for ARCompact builds"

* tag 'arc-4.4-rc1-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc:
  ARC: Fix silly typo in MAINTAINERS file
  ARC: cpu_relax() to be compiler barrier even for UP
  ARC: use ASL assembler mnemonic
  ARC: [arcompact] Handle bus error from userspace as Interrupt not exception
  ARC: remove extraneous header include
  ARCv2: lib: memcpy: use local symbols
Linus Torvalds 9 years ago
parent
commit
b3a0d9a232

+ 1 - 1
MAINTAINERS

@@ -10300,7 +10300,7 @@ F:	include/net/switchdev.h
 
 SYNOPSYS ARC ARCHITECTURE
 M:	Vineet Gupta <vgupta@synopsys.com>
-L:	linux-snps-arc@lists.infraded.org
+L:	linux-snps-arc@lists.infradead.org
 S:	Supported
 F:	arch/arc/
 F:	Documentation/devicetree/bindings/arc/*

+ 0 - 4
arch/arc/include/asm/processor.h

@@ -57,11 +57,7 @@ struct task_struct;
  * A lot of busy-wait loops in SMP are based off of non-volatile data otherwise
  * get optimised away by gcc
  */
-#ifdef CONFIG_SMP
 #define cpu_relax()	__asm__ __volatile__ ("" : : : "memory")
-#else
-#define cpu_relax()	do { } while (0)
-#endif
 
 #define cpu_relax_lowlatency() cpu_relax()
 

+ 19 - 0
arch/arc/kernel/entry-arcv2.S

@@ -91,6 +91,25 @@ ENTRY(EV_DCError)
 	flag 1
 END(EV_DCError)
 
+; ---------------------------------------------
+; Memory Error Exception Handler
+;   - Unlike ARCompact, handles Bus errors for both User/Kernel mode,
+;     Instruction fetch or Data access, under a single Exception Vector
+; ---------------------------------------------
+
+ENTRY(mem_service)
+
+	EXCEPTION_PROLOGUE
+
+	lr  r0, [efa]
+	mov r1, sp
+
+	FAKE_RET_FROM_EXCPN
+
+	bl  do_memory_error
+	b   ret_from_exception
+END(mem_service)
+
 ENTRY(EV_Misaligned)
 
 	EXCEPTION_PROLOGUE

+ 25 - 4
arch/arc/kernel/entry-compact.S

@@ -142,16 +142,12 @@ int1_saved_reg:
 	.zero 4
 
 /* Each Interrupt level needs its own scratch */
-#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
-
 ARCFP_DATA int2_saved_reg
 	.type   int2_saved_reg, @object
 	.size   int2_saved_reg, 4
 int2_saved_reg:
 	.zero 4
 
-#endif
-
 ; ---------------------------------------------
 	.section .text, "ax",@progbits
 
@@ -215,6 +211,31 @@ END(handle_interrupt_level2)
 
 #endif
 
+; ---------------------------------------------
+; User Mode Memory Bus Error Interrupt Handler
+; (Kernel mode memory errors handled via seperate exception vectors)
+; ---------------------------------------------
+ENTRY(mem_service)
+
+	INTERRUPT_PROLOGUE 2
+
+	mov r0, ilink2
+	mov r1, sp
+
+	; User process needs to be killed with SIGBUS, but first need to get
+	; out of the L2 interrupt context (drop to pure kernel mode) and jump
+	; off to "C" code where SIGBUS in enqueued
+	lr  r3, [status32]
+	bclr r3, r3, STATUS_A2_BIT
+	or  r3, r3, (STATUS_E1_MASK|STATUS_E2_MASK)
+	sr  r3, [status32_l2]
+	mov ilink2, 1f
+	rtie
+1:
+	bl  do_memory_error
+	b   ret_from_exception
+END(mem_service)
+
 ; ---------------------------------------------
 ;  Level 1 ISR
 ; ---------------------------------------------

+ 0 - 17
arch/arc/kernel/entry.S

@@ -92,23 +92,6 @@ ENTRY(instr_service)
 	b   ret_from_exception
 END(instr_service)
 
-; ---------------------------------------------
-; Memory Error Exception Handler
-; ---------------------------------------------
-
-ENTRY(mem_service)
-
-	EXCEPTION_PROLOGUE
-
-	lr  r0, [efa]
-	mov r1, sp
-
-	FAKE_RET_FROM_EXCPN
-
-	bl  do_memory_error
-	b   ret_from_exception
-END(mem_service)
-
 ; ---------------------------------------------
 ; Machine Check Exception Handler
 ; ---------------------------------------------

+ 26 - 26
arch/arc/lib/memcpy-archs.S

@@ -50,26 +50,26 @@ ENTRY(memcpy)
 
 ;;; if size <= 8
 	cmp	r2, 8
-	bls.d	@smallchunk
+	bls.d	@.Lsmallchunk
 	mov.f	lp_count, r2
 
 	and.f	r4, r0, 0x03
 	rsub	lp_count, r4, 4
-	lpnz	@aligndestination
+	lpnz	@.Laligndestination
 	;; LOOP BEGIN
 	ldb.ab	r5, [r1,1]
 	sub	r2, r2, 1
 	stb.ab	r5, [r3,1]
-aligndestination:
+.Laligndestination:
 
 ;;; Check the alignment of the source
 	and.f	r4, r1, 0x03
-	bnz.d	@sourceunaligned
+	bnz.d	@.Lsourceunaligned
 
 ;;; CASE 0: Both source and destination are 32bit aligned
 ;;; Convert len to Dwords, unfold x4
 	lsr.f	lp_count, r2, ZOLSHFT
-	lpnz	@copy32_64bytes
+	lpnz	@.Lcopy32_64bytes
 	;; LOOP START
 	LOADX (r6, r1)
 	PREFETCH_READ (r1)
@@ -81,25 +81,25 @@ aligndestination:
 	STOREX (r8, r3)
 	STOREX (r10, r3)
 	STOREX (r4, r3)
-copy32_64bytes:
+.Lcopy32_64bytes:
 
 	and.f	lp_count, r2, ZOLAND ;Last remaining 31 bytes
-smallchunk:
-	lpnz	@copyremainingbytes
+.Lsmallchunk:
+	lpnz	@.Lcopyremainingbytes
 	;; LOOP START
 	ldb.ab	r5, [r1,1]
 	stb.ab	r5, [r3,1]
-copyremainingbytes:
+.Lcopyremainingbytes:
 
 	j	[blink]
 ;;; END CASE 0
 
-sourceunaligned:
+.Lsourceunaligned:
 	cmp	r4, 2
-	beq.d	@unalignedOffby2
+	beq.d	@.LunalignedOffby2
 	sub	r2, r2, 1
 
-	bhi.d	@unalignedOffby3
+	bhi.d	@.LunalignedOffby3
 	ldb.ab	r5, [r1, 1]
 
 ;;; CASE 1: The source is unaligned, off by 1
@@ -114,7 +114,7 @@ sourceunaligned:
 	or	r5, r5, r6
 
 	;; Both src and dst are aligned
-	lpnz	@copy8bytes_1
+	lpnz	@.Lcopy8bytes_1
 	;; LOOP START
 	ld.ab	r6, [r1, 4]
 	prefetch [r1, 28]	;Prefetch the next read location
@@ -131,7 +131,7 @@ sourceunaligned:
 
 	st.ab	r7, [r3, 4]
 	st.ab	r9, [r3, 4]
-copy8bytes_1:
+.Lcopy8bytes_1:
 
 	;; Write back the remaining 16bits
 	EXTRACT_1 (r6, r5, 16)
@@ -141,14 +141,14 @@ copy8bytes_1:
 	stb.ab	r5, [r3, 1]
 
 	and.f	lp_count, r2, 0x07 ;Last 8bytes
-	lpnz	@copybytewise_1
+	lpnz	@.Lcopybytewise_1
 	;; LOOP START
 	ldb.ab	r6, [r1,1]
 	stb.ab	r6, [r3,1]
-copybytewise_1:
+.Lcopybytewise_1:
 	j	[blink]
 
-unalignedOffby2:
+.LunalignedOffby2:
 ;;; CASE 2: The source is unaligned, off by 2
 	ldh.ab	r5, [r1, 2]
 	sub	r2, r2, 1
@@ -159,7 +159,7 @@ unalignedOffby2:
 #ifdef __BIG_ENDIAN__
 	asl.nz	r5, r5, 16
 #endif
-	lpnz	@copy8bytes_2
+	lpnz	@.Lcopy8bytes_2
 	;; LOOP START
 	ld.ab	r6, [r1, 4]
 	prefetch [r1, 28]	;Prefetch the next read location
@@ -176,7 +176,7 @@ unalignedOffby2:
 
 	st.ab	r7, [r3, 4]
 	st.ab	r9, [r3, 4]
-copy8bytes_2:
+.Lcopy8bytes_2:
 
 #ifdef __BIG_ENDIAN__
 	lsr.nz	r5, r5, 16
@@ -184,14 +184,14 @@ copy8bytes_2:
 	sth.ab	r5, [r3, 2]
 
 	and.f	lp_count, r2, 0x07 ;Last 8bytes
-	lpnz	@copybytewise_2
+	lpnz	@.Lcopybytewise_2
 	;; LOOP START
 	ldb.ab	r6, [r1,1]
 	stb.ab	r6, [r3,1]
-copybytewise_2:
+.Lcopybytewise_2:
 	j	[blink]
 
-unalignedOffby3:
+.LunalignedOffby3:
 ;;; CASE 3: The source is unaligned, off by 3
 ;;; Hence, I need to read 1byte for achieve the 32bit alignment
 
@@ -201,7 +201,7 @@ unalignedOffby3:
 #ifdef __BIG_ENDIAN__
 	asl.ne	r5, r5, 24
 #endif
-	lpnz	@copy8bytes_3
+	lpnz	@.Lcopy8bytes_3
 	;; LOOP START
 	ld.ab	r6, [r1, 4]
 	prefetch [r1, 28]	;Prefetch the next read location
@@ -218,7 +218,7 @@ unalignedOffby3:
 
 	st.ab	r7, [r3, 4]
 	st.ab	r9, [r3, 4]
-copy8bytes_3:
+.Lcopy8bytes_3:
 
 #ifdef __BIG_ENDIAN__
 	lsr.nz	r5, r5, 24
@@ -226,11 +226,11 @@ copy8bytes_3:
 	stb.ab	r5, [r3, 1]
 
 	and.f	lp_count, r2, 0x07 ;Last 8bytes
-	lpnz	@copybytewise_3
+	lpnz	@.Lcopybytewise_3
 	;; LOOP START
 	ldb.ab	r6, [r1,1]
 	stb.ab	r6, [r3,1]
-copybytewise_3:
+.Lcopybytewise_3:
 	j	[blink]
 
 END(memcpy)

+ 3 - 3
arch/arc/mm/tlbex.S

@@ -88,7 +88,7 @@ ex_saved_reg1:
 #ifdef CONFIG_SMP
 	sr  r0, [ARC_REG_SCRATCH_DATA0]	; freeup r0 to code with
 	GET_CPU_ID  r0			; get to per cpu scratch mem,
-	lsl r0, r0, L1_CACHE_SHIFT	; cache line wide per cpu
+	asl r0, r0, L1_CACHE_SHIFT	; cache line wide per cpu
 	add r0, @ex_saved_reg1, r0
 #else
 	st    r0, [@ex_saved_reg1]
@@ -107,7 +107,7 @@ ex_saved_reg1:
 .macro TLBMISS_RESTORE_REGS
 #ifdef CONFIG_SMP
 	GET_CPU_ID  r0			; get to per cpu scratch mem
-	lsl r0, r0, L1_CACHE_SHIFT	; each is cache line wide
+	asl r0, r0, L1_CACHE_SHIFT	; each is cache line wide
 	add r0, @ex_saved_reg1, r0
 	ld_s  r3, [r0,12]
 	ld_s  r2, [r0, 8]
@@ -256,7 +256,7 @@ ex_saved_reg1:
 
 .macro CONV_PTE_TO_TLB
 	and    r3, r0, PTE_BITS_RWX	;          r  w  x
-	lsl    r2, r3, 3		; Kr Kw Kx 0  0  0 (GLOBAL, kernel only)
+	asl    r2, r3, 3		; Kr Kw Kx 0  0  0 (GLOBAL, kernel only)
 	and.f  0,  r0, _PAGE_GLOBAL
 	or.z   r2, r2, r3		; Kr Kw Kx Ur Uw Ux (!GLOBAL, user page)
 

+ 0 - 1
arch/arc/plat-sim/platform.c

@@ -10,7 +10,6 @@
 
 #include <linux/init.h>
 #include <asm/mach_desc.h>
-#include <asm/mcip.h>
 
 /*----------------------- Machine Descriptions ------------------------------
  *