소스 검색

arm64: cmpxchg: avoid "cc" clobber in ll/sc routines

We can perform the cmpxchg comparison using eor and cbnz which avoids
the "cc" clobber for the ll/sc case and consequently for the LSE case
where we may have to fall-back on the ll/sc code at runtime.

Reviewed-by: Steve Capper <steve.capper@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Will Deacon 10 년 전
부모
커밋
0bc671d3f4
2개의 변경된 파일8개의 추가작업 그리고 10개의 파일을 삭제
  1. 6 8
      arch/arm64/include/asm/atomic_ll_sc.h
  2. 2 2
      arch/arm64/include/asm/atomic_lse.h

+ 6 - 8
arch/arm64/include/asm/atomic_ll_sc.h

@@ -101,14 +101,13 @@ __LL_SC_PREFIX(atomic_cmpxchg(atomic_t *ptr, int old, int new))
 
 	asm volatile("// atomic_cmpxchg\n"
 "1:	ldxr	%w1, %2\n"
-"	cmp	%w1, %w3\n"
-"	b.ne	2f\n"
+"	eor	%w0, %w1, %w3\n"
+"	cbnz	%w0, 2f\n"
 "	stxr	%w0, %w4, %2\n"
 "	cbnz	%w0, 1b\n"
 "2:"
 	: "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
-	: "Ir" (old), "r" (new)
-	: "cc");
+	: "Lr" (old), "r" (new));
 
 	smp_mb();
 	return oldval;
@@ -179,14 +178,13 @@ __LL_SC_PREFIX(atomic64_cmpxchg(atomic64_t *ptr, long old, long new))
 
 	asm volatile("// atomic64_cmpxchg\n"
 "1:	ldxr	%1, %2\n"
-"	cmp	%1, %3\n"
-"	b.ne	2f\n"
+"	eor	%0, %1, %3\n"
+"	cbnz	%w0, 2f\n"
 "	stxr	%w0, %4, %2\n"
 "	cbnz	%w0, 1b\n"
 "2:"
 	: "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
-	: "Ir" (old), "r" (new)
-	: "cc");
+	: "Lr" (old), "r" (new));
 
 	smp_mb();
 	return oldval;

+ 2 - 2
arch/arm64/include/asm/atomic_lse.h

@@ -166,7 +166,7 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
 	"	mov	%w[ret], w30")
 	: [ret] "+r" (x0), [v] "+Q" (ptr->counter)
 	: [old] "r" (w1), [new] "r" (w2)
-	: "x30", "cc", "memory");
+	: "x30", "memory");
 
 	return x0;
 }
@@ -313,7 +313,7 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
 	"	mov	%[ret], x30")
 	: [ret] "+r" (x0), [v] "+Q" (ptr->counter)
 	: [old] "r" (x1), [new] "r" (x2)
-	: "x30", "cc", "memory");
+	: "x30", "memory");
 
 	return x0;
 }