Browse Source

s390/atomic: refactor atomic primitives

Rework atomic.h to make the low level functions avaible for use
in other headers without using atomic_t, e.g. in bitops.h.

Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Martin Schwidefsky 8 years ago
parent
commit
126b30c3cb
3 changed files with 170 additions and 169 deletions
  1. 39 168
      arch/s390/include/asm/atomic.h
  2. 130 0
      arch/s390/include/asm/atomic_ops.h
  3. 1 1
      arch/s390/pci/pci_debug.c

+ 39 - 168
arch/s390/include/asm/atomic.h

@@ -1,13 +1,8 @@
 /*
 /*
- * Copyright IBM Corp. 1999, 2009
+ * Copyright IBM Corp. 1999, 2016
  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
  *	      Denis Joseph Barrow,
  *	      Denis Joseph Barrow,
- *	      Arnd Bergmann <arndb@de.ibm.com>,
- *
- * Atomic operations that C can't guarantee us.
- * Useful for resource counting etc.
- * s390 uses 'Compare And Swap' for atomicity in SMP environment.
- *
+ *	      Arnd Bergmann,
  */
  */
 
 
 #ifndef __ARCH_S390_ATOMIC__
 #ifndef __ARCH_S390_ATOMIC__
@@ -15,62 +10,12 @@
 
 
 #include <linux/compiler.h>
 #include <linux/compiler.h>
 #include <linux/types.h>
 #include <linux/types.h>
+#include <asm/atomic_ops.h>
 #include <asm/barrier.h>
 #include <asm/barrier.h>
 #include <asm/cmpxchg.h>
 #include <asm/cmpxchg.h>
 
 
 #define ATOMIC_INIT(i)  { (i) }
 #define ATOMIC_INIT(i)  { (i) }
 
 
-#define __ATOMIC_NO_BARRIER	"\n"
-
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
-
-#define __ATOMIC_OR	"lao"
-#define __ATOMIC_AND	"lan"
-#define __ATOMIC_ADD	"laa"
-#define __ATOMIC_XOR	"lax"
-#define __ATOMIC_BARRIER "bcr	14,0\n"
-
-#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier)		\
-({									\
-	int old_val;							\
-									\
-	typecheck(atomic_t *, ptr);					\
-	asm volatile(							\
-		op_string "	%0,%2,%1\n"				\
-		__barrier						\
-		: "=d" (old_val), "+Q" ((ptr)->counter)			\
-		: "d" (op_val)						\
-		: "cc", "memory");					\
-	old_val;							\
-})
-
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
-#define __ATOMIC_OR	"or"
-#define __ATOMIC_AND	"nr"
-#define __ATOMIC_ADD	"ar"
-#define __ATOMIC_XOR	"xr"
-#define __ATOMIC_BARRIER "\n"
-
-#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier)		\
-({									\
-	int old_val, new_val;						\
-									\
-	typecheck(atomic_t *, ptr);					\
-	asm volatile(							\
-		"	l	%0,%2\n"				\
-		"0:	lr	%1,%0\n"				\
-		op_string "	%1,%3\n"				\
-		"	cs	%0,%1,%2\n"				\
-		"	jl	0b"					\
-		: "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
-		: "d" (op_val)						\
-		: "cc", "memory");					\
-	old_val;							\
-})
-
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
 static inline int atomic_read(const atomic_t *v)
 static inline int atomic_read(const atomic_t *v)
 {
 {
 	int c;
 	int c;
@@ -90,27 +35,23 @@ static inline void atomic_set(atomic_t *v, int i)
 
 
 static inline int atomic_add_return(int i, atomic_t *v)
 static inline int atomic_add_return(int i, atomic_t *v)
 {
 {
-	return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i;
+	return __atomic_add_barrier(i, &v->counter) + i;
 }
 }
 
 
 static inline int atomic_fetch_add(int i, atomic_t *v)
 static inline int atomic_fetch_add(int i, atomic_t *v)
 {
 {
-	return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER);
+	return __atomic_add_barrier(i, &v->counter);
 }
 }
 
 
 static inline void atomic_add(int i, atomic_t *v)
 static inline void atomic_add(int i, atomic_t *v)
 {
 {
 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
 	if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
 	if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
-		asm volatile(
-			"asi	%0,%1\n"
-			: "+Q" (v->counter)
-			: "i" (i)
-			: "cc", "memory");
+		__atomic_add_const(i, &v->counter);
 		return;
 		return;
 	}
 	}
 #endif
 #endif
-	__ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_NO_BARRIER);
+	__atomic_add(i, &v->counter);
 }
 }
 
 
 #define atomic_add_negative(_i, _v)	(atomic_add_return(_i, _v) < 0)
 #define atomic_add_negative(_i, _v)	(atomic_add_return(_i, _v) < 0)
@@ -125,19 +66,19 @@ static inline void atomic_add(int i, atomic_t *v)
 #define atomic_dec_return(_v)		atomic_sub_return(1, _v)
 #define atomic_dec_return(_v)		atomic_sub_return(1, _v)
 #define atomic_dec_and_test(_v)		(atomic_sub_return(1, _v) == 0)
 #define atomic_dec_and_test(_v)		(atomic_sub_return(1, _v) == 0)
 
 
-#define ATOMIC_OPS(op, OP)						\
+#define ATOMIC_OPS(op)							\
 static inline void atomic_##op(int i, atomic_t *v)			\
 static inline void atomic_##op(int i, atomic_t *v)			\
 {									\
 {									\
-	__ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER);	\
+	__atomic_##op(i, &v->counter);					\
 }									\
 }									\
 static inline int atomic_fetch_##op(int i, atomic_t *v)			\
 static inline int atomic_fetch_##op(int i, atomic_t *v)			\
 {									\
 {									\
-	return __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_BARRIER);	\
+	return __atomic_##op##_barrier(i, &v->counter);			\
 }
 }
 
 
-ATOMIC_OPS(and, AND)
-ATOMIC_OPS(or, OR)
-ATOMIC_OPS(xor, XOR)
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
 
 
 #undef ATOMIC_OPS
 #undef ATOMIC_OPS
 
 
@@ -145,12 +86,7 @@ ATOMIC_OPS(xor, XOR)
 
 
 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 {
 {
-	asm volatile(
-		"	cs	%0,%2,%1"
-		: "+d" (old), "+Q" (v->counter)
-		: "d" (new)
-		: "cc", "memory");
-	return old;
+	return __atomic_cmpxchg(&v->counter, old, new);
 }
 }
 
 
 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
@@ -168,65 +104,11 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 	return c;
 	return c;
 }
 }
 
 
-
-#undef __ATOMIC_LOOP
-
 #define ATOMIC64_INIT(i)  { (i) }
 #define ATOMIC64_INIT(i)  { (i) }
 
 
-#define __ATOMIC64_NO_BARRIER	"\n"
-
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
-
-#define __ATOMIC64_OR	"laog"
-#define __ATOMIC64_AND	"lang"
-#define __ATOMIC64_ADD	"laag"
-#define __ATOMIC64_XOR	"laxg"
-#define __ATOMIC64_BARRIER "bcr	14,0\n"
-
-#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier)		\
-({									\
-	long long old_val;						\
-									\
-	typecheck(atomic64_t *, ptr);					\
-	asm volatile(							\
-		op_string "	%0,%2,%1\n"				\
-		__barrier						\
-		: "=d" (old_val), "+Q" ((ptr)->counter)			\
-		: "d" (op_val)						\
-		: "cc", "memory");					\
-	old_val;							\
-})
-
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
-#define __ATOMIC64_OR	"ogr"
-#define __ATOMIC64_AND	"ngr"
-#define __ATOMIC64_ADD	"agr"
-#define __ATOMIC64_XOR	"xgr"
-#define __ATOMIC64_BARRIER "\n"
-
-#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier)		\
-({									\
-	long long old_val, new_val;					\
-									\
-	typecheck(atomic64_t *, ptr);					\
-	asm volatile(							\
-		"	lg	%0,%2\n"				\
-		"0:	lgr	%1,%0\n"				\
-		op_string "	%1,%3\n"				\
-		"	csg	%0,%1,%2\n"				\
-		"	jl	0b"					\
-		: "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
-		: "d" (op_val)						\
-		: "cc", "memory");					\
-	old_val;							\
-})
-
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
-static inline long long atomic64_read(const atomic64_t *v)
+static inline long atomic64_read(const atomic64_t *v)
 {
 {
-	long long c;
+	long c;
 
 
 	asm volatile(
 	asm volatile(
 		"	lg	%0,%1\n"
 		"	lg	%0,%1\n"
@@ -234,71 +116,60 @@ static inline long long atomic64_read(const atomic64_t *v)
 	return c;
 	return c;
 }
 }
 
 
-static inline void atomic64_set(atomic64_t *v, long long i)
+static inline void atomic64_set(atomic64_t *v, long i)
 {
 {
 	asm volatile(
 	asm volatile(
 		"	stg	%1,%0\n"
 		"	stg	%1,%0\n"
 		: "=Q" (v->counter) : "d" (i));
 		: "=Q" (v->counter) : "d" (i));
 }
 }
 
 
-static inline long long atomic64_add_return(long long i, atomic64_t *v)
+static inline long atomic64_add_return(long i, atomic64_t *v)
 {
 {
-	return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i;
+	return __atomic64_add_barrier(i, &v->counter) + i;
 }
 }
 
 
-static inline long long atomic64_fetch_add(long long i, atomic64_t *v)
+static inline long atomic64_fetch_add(long i, atomic64_t *v)
 {
 {
-	return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER);
+	return __atomic64_add_barrier(i, &v->counter);
 }
 }
 
 
-static inline void atomic64_add(long long i, atomic64_t *v)
+static inline void atomic64_add(long i, atomic64_t *v)
 {
 {
 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
 	if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
 	if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
-		asm volatile(
-			"agsi	%0,%1\n"
-			: "+Q" (v->counter)
-			: "i" (i)
-			: "cc", "memory");
+		__atomic64_add_const(i, &v->counter);
 		return;
 		return;
 	}
 	}
 #endif
 #endif
-	__ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
+	__atomic64_add(i, &v->counter);
 }
 }
 
 
 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
 
 
-static inline long long atomic64_cmpxchg(atomic64_t *v,
-					     long long old, long long new)
+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
 {
 {
-	asm volatile(
-		"	csg	%0,%2,%1"
-		: "+d" (old), "+Q" (v->counter)
-		: "d" (new)
-		: "cc", "memory");
-	return old;
+	return __atomic64_cmpxchg(&v->counter, old, new);
 }
 }
 
 
-#define ATOMIC64_OPS(op, OP)						\
+#define ATOMIC64_OPS(op)						\
 static inline void atomic64_##op(long i, atomic64_t *v)			\
 static inline void atomic64_##op(long i, atomic64_t *v)			\
 {									\
 {									\
-	__ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER);	\
+	__atomic64_##op(i, &v->counter);				\
 }									\
 }									\
 static inline long atomic64_fetch_##op(long i, atomic64_t *v)		\
 static inline long atomic64_fetch_##op(long i, atomic64_t *v)		\
 {									\
 {									\
-	return __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_BARRIER); \
+	return __atomic64_##op##_barrier(i, &v->counter);		\
 }
 }
 
 
-ATOMIC64_OPS(and, AND)
-ATOMIC64_OPS(or, OR)
-ATOMIC64_OPS(xor, XOR)
+ATOMIC64_OPS(and)
+ATOMIC64_OPS(or)
+ATOMIC64_OPS(xor)
 
 
 #undef ATOMIC64_OPS
 #undef ATOMIC64_OPS
-#undef __ATOMIC64_LOOP
 
 
-static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
+static inline int atomic64_add_unless(atomic64_t *v, long i, long u)
 {
 {
-	long long c, old;
+	long c, old;
 
 
 	c = atomic64_read(v);
 	c = atomic64_read(v);
 	for (;;) {
 	for (;;) {
@@ -312,9 +183,9 @@ static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
 	return c != u;
 	return c != u;
 }
 }
 
 
-static inline long long atomic64_dec_if_positive(atomic64_t *v)
+static inline long atomic64_dec_if_positive(atomic64_t *v)
 {
 {
-	long long c, old, dec;
+	long c, old, dec;
 
 
 	c = atomic64_read(v);
 	c = atomic64_read(v);
 	for (;;) {
 	for (;;) {
@@ -333,9 +204,9 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
 #define atomic64_inc(_v)		atomic64_add(1, _v)
 #define atomic64_inc(_v)		atomic64_add(1, _v)
 #define atomic64_inc_return(_v)		atomic64_add_return(1, _v)
 #define atomic64_inc_return(_v)		atomic64_add_return(1, _v)
 #define atomic64_inc_and_test(_v)	(atomic64_add_return(1, _v) == 0)
 #define atomic64_inc_and_test(_v)	(atomic64_add_return(1, _v) == 0)
-#define atomic64_sub_return(_i, _v)	atomic64_add_return(-(long long)(_i), _v)
-#define atomic64_fetch_sub(_i, _v)	atomic64_fetch_add(-(long long)(_i), _v)
-#define atomic64_sub(_i, _v)		atomic64_add(-(long long)(_i), _v)
+#define atomic64_sub_return(_i, _v)	atomic64_add_return(-(long)(_i), _v)
+#define atomic64_fetch_sub(_i, _v)	atomic64_fetch_add(-(long)(_i), _v)
+#define atomic64_sub(_i, _v)		atomic64_add(-(long)(_i), _v)
 #define atomic64_sub_and_test(_i, _v)	(atomic64_sub_return(_i, _v) == 0)
 #define atomic64_sub_and_test(_i, _v)	(atomic64_sub_return(_i, _v) == 0)
 #define atomic64_dec(_v)		atomic64_sub(1, _v)
 #define atomic64_dec(_v)		atomic64_sub(1, _v)
 #define atomic64_dec_return(_v)		atomic64_sub_return(1, _v)
 #define atomic64_dec_return(_v)		atomic64_sub_return(1, _v)

+ 130 - 0
arch/s390/include/asm/atomic_ops.h

@@ -0,0 +1,130 @@
+/*
+ * Low level function for atomic operations
+ *
+ * Copyright IBM Corp. 1999, 2016
+ */
+
+#ifndef __ARCH_S390_ATOMIC_OPS__
+#define __ARCH_S390_ATOMIC_OPS__
+
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define __ATOMIC_OP(op_name, op_type, op_string, op_barrier)		\
+static inline op_type op_name(op_type val, op_type *ptr)		\
+{									\
+	op_type old;							\
+									\
+	asm volatile(							\
+		op_string "	%[old],%[val],%[ptr]\n"			\
+		op_barrier						\
+		: [old] "=d" (old), [ptr] "+Q" (*ptr)			\
+		: [val] "d" (val) : "cc", "memory");			\
+	return old;							\
+}									\
+
+#define __ATOMIC_OPS(op_name, op_type, op_string)			\
+	__ATOMIC_OP(op_name, op_type, op_string, "\n")			\
+	__ATOMIC_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
+
+__ATOMIC_OPS(__atomic_add, int, "laa")
+__ATOMIC_OPS(__atomic_and, int, "lan")
+__ATOMIC_OPS(__atomic_or,  int, "lao")
+__ATOMIC_OPS(__atomic_xor, int, "lax")
+
+__ATOMIC_OPS(__atomic64_add, long, "laag")
+__ATOMIC_OPS(__atomic64_and, long, "lang")
+__ATOMIC_OPS(__atomic64_or,  long, "laog")
+__ATOMIC_OPS(__atomic64_xor, long, "laxg")
+
+#undef __ATOMIC_OPS
+#undef __ATOMIC_OP
+
+static inline void __atomic_add_const(int val, int *ptr)
+{
+	asm volatile(
+		"	asi	%[ptr],%[val]\n"
+		: [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
+}
+
+static inline void __atomic64_add_const(long val, long *ptr)
+{
+	asm volatile(
+		"	agsi	%[ptr],%[val]\n"
+		: [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
+}
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define __ATOMIC_OP(op_name, op_string)					\
+static inline int op_name(int val, int *ptr)				\
+{									\
+	int old, new;							\
+									\
+	asm volatile(							\
+		"0:	lr	%[new],%[old]\n"			\
+		op_string "	%[new],%[val]\n"			\
+		"	cs	%[old],%[new],%[ptr]\n"			\
+		"	jl	0b"					\
+		: [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
+		: [val] "d" (val), "0" (*ptr) : "cc", "memory");	\
+	return old;							\
+}
+
+#define __ATOMIC_OPS(op_name, op_string)				\
+	__ATOMIC_OP(op_name, op_string)					\
+	__ATOMIC_OP(op_name##_barrier, op_string)
+
+__ATOMIC_OPS(__atomic_add, "ar")
+__ATOMIC_OPS(__atomic_and, "nr")
+__ATOMIC_OPS(__atomic_or,  "or")
+__ATOMIC_OPS(__atomic_xor, "xr")
+
+#undef __ATOMIC_OPS
+
+#define __ATOMIC64_OP(op_name, op_string)				\
+static inline long op_name(long val, long *ptr)				\
+{									\
+	long old, new;							\
+									\
+	asm volatile(							\
+		"0:	lgr	%[new],%[old]\n"			\
+		op_string "	%[new],%[val]\n"			\
+		"	csg	%[old],%[new],%[ptr]\n"			\
+		"	jl	0b"					\
+		: [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
+		: [val] "d" (val), "0" (*ptr) : "cc", "memory");	\
+	return old;							\
+}
+
+#define __ATOMIC64_OPS(op_name, op_string)				\
+	__ATOMIC64_OP(op_name, op_string)				\
+	__ATOMIC64_OP(op_name##_barrier, op_string)
+
+__ATOMIC64_OPS(__atomic64_add, "agr")
+__ATOMIC64_OPS(__atomic64_and, "ngr")
+__ATOMIC64_OPS(__atomic64_or,  "ogr")
+__ATOMIC64_OPS(__atomic64_xor, "xgr")
+
+#undef __ATOMIC64_OPS
+
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+static inline int __atomic_cmpxchg(int *ptr, int old, int new)
+{
+	asm volatile(
+		"	cs	%[old],%[new],%[ptr]"
+		: [old] "+d" (old), [ptr] "+Q" (*ptr)
+		: [new] "d" (new) : "cc", "memory");
+	return old;
+}
+
+static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
+{
+	asm volatile(
+		"	csg	%[old],%[new],%[ptr]"
+		: [old] "+d" (old), [ptr] "+Q" (*ptr)
+		: [new] "d" (new) : "cc", "memory");
+	return old;
+}
+
+#endif /* __ARCH_S390_ATOMIC_OPS__  */

+ 1 - 1
arch/s390/pci/pci_debug.c

@@ -69,7 +69,7 @@ static void pci_sw_counter_show(struct seq_file *m)
 	int i;
 	int i;
 
 
 	for (i = 0; i < ARRAY_SIZE(pci_sw_names); i++, counter++)
 	for (i = 0; i < ARRAY_SIZE(pci_sw_names); i++, counter++)
-		seq_printf(m, "%26s:\t%llu\n", pci_sw_names[i],
+		seq_printf(m, "%26s:\t%lu\n", pci_sw_names[i],
 			   atomic64_read(counter));
 			   atomic64_read(counter));
 }
 }