浏览代码

s390/rwlock: use the interlocked-access facility 1 instructions

Make use of the load-and-add, load-and-or and load-and-and instructions
to atomically update the read-write lock without a compare-and-swap loop.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Martin Schwidefsky 11 年之前
父节点
当前提交
bbae71bf9c
共有 2 个文件被更改,包括 108 次插入2 次删除
  1. 74 2
      arch/s390/include/asm/spinlock.h
  2. 34 0
      arch/s390/lib/spinlock.c

+ 74 - 2
arch/s390/include/asm/spinlock.h

@@ -130,8 +130,6 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
  */
  */
 #define arch_write_can_lock(x) ((x)->lock == 0)
 #define arch_write_can_lock(x) ((x)->lock == 0)
 
 
-extern void _raw_read_lock_wait(arch_rwlock_t *lp);
-extern void _raw_write_lock_wait(arch_rwlock_t *lp);
 extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
 extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
 extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
 extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
 
 
@@ -152,6 +150,78 @@ static inline int arch_write_trylock_once(arch_rwlock_t *rw)
 		      _raw_compare_and_swap(&rw->lock, 0, 0x80000000));
 		      _raw_compare_and_swap(&rw->lock, 0, 0x80000000));
 }
 }
 
 
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define __RAW_OP_OR	"lao"
+#define __RAW_OP_AND	"lan"
+#define __RAW_OP_ADD	"laa"
+
+#define __RAW_LOCK(ptr, op_val, op_string)		\
+({							\
+	unsigned int old_val;				\
+							\
+	typecheck(unsigned int *, ptr);			\
+	asm volatile(					\
+		op_string "	%0,%2,%1\n"		\
+		"bcr	14,0\n"				\
+		: "=d" (old_val), "+Q" (*ptr)		\
+		: "d" (op_val)				\
+		: "cc", "memory");			\
+	old_val;					\
+})
+
+#define __RAW_UNLOCK(ptr, op_val, op_string)		\
+({							\
+	unsigned int old_val;				\
+							\
+	typecheck(unsigned int *, ptr);			\
+	asm volatile(					\
+		"bcr	14,0\n"				\
+		op_string "	%0,%2,%1\n"		\
+		: "=d" (old_val), "+Q" (*ptr)		\
+		: "d" (op_val)				\
+		: "cc", "memory");			\
+	old_val;					\
+})
+
+extern void _raw_read_lock_wait(arch_rwlock_t *lp);
+extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev);
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+	unsigned int old;
+
+	old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
+	if ((int) old < 0)
+		_raw_read_lock_wait(rw);
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+	__RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+	unsigned int old;
+
+	old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
+	if (old != 0)
+		_raw_write_lock_wait(rw, old);
+	rw->owner = SPINLOCK_LOCKVAL;
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+	rw->owner = 0;
+	__RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
+}
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+extern void _raw_read_lock_wait(arch_rwlock_t *lp);
+extern void _raw_write_lock_wait(arch_rwlock_t *lp);
+
 static inline void arch_read_lock(arch_rwlock_t *rw)
 static inline void arch_read_lock(arch_rwlock_t *rw)
 {
 {
 	if (!arch_read_trylock_once(rw))
 	if (!arch_read_trylock_once(rw))
@@ -187,6 +257,8 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
 		: "cc", "memory");
 		: "cc", "memory");
 }
 }
 
 
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
 static inline int arch_read_trylock(arch_rwlock_t *rw)
 static inline int arch_read_trylock(arch_rwlock_t *rw)
 {
 {
 	if (!arch_read_trylock_once(rw))
 	if (!arch_read_trylock_once(rw))

+ 34 - 0
arch/s390/lib/spinlock.c

@@ -114,6 +114,9 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
 	unsigned int owner, old;
 	unsigned int owner, old;
 	int count = spin_retry;
 	int count = spin_retry;
 
 
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+	__RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
+#endif
 	owner = 0;
 	owner = 0;
 	while (1) {
 	while (1) {
 		if (count-- <= 0) {
 		if (count-- <= 0) {
@@ -147,6 +150,35 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw)
 }
 }
 EXPORT_SYMBOL(_raw_read_trylock_retry);
 EXPORT_SYMBOL(_raw_read_trylock_retry);
 
 
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
+{
+	unsigned int owner, old;
+	int count = spin_retry;
+
+	owner = 0;
+	while (1) {
+		if (count-- <= 0) {
+			if (owner && !smp_vcpu_scheduled(~owner))
+				smp_yield_cpu(~owner);
+			count = spin_retry;
+		}
+		old = ACCESS_ONCE(rw->lock);
+		owner = ACCESS_ONCE(rw->owner);
+		smp_rmb();
+		if ((int) old >= 0) {
+			prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
+			old = prev;
+		}
+		if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
+			break;
+	}
+}
+EXPORT_SYMBOL(_raw_write_lock_wait);
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
 void _raw_write_lock_wait(arch_rwlock_t *rw)
 void _raw_write_lock_wait(arch_rwlock_t *rw)
 {
 {
 	unsigned int owner, old, prev;
 	unsigned int owner, old, prev;
@@ -173,6 +205,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
 }
 }
 EXPORT_SYMBOL(_raw_write_lock_wait);
 EXPORT_SYMBOL(_raw_write_lock_wait);
 
 
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
 int _raw_write_trylock_retry(arch_rwlock_t *rw)
 int _raw_write_trylock_retry(arch_rwlock_t *rw)
 {
 {
 	unsigned int old;
 	unsigned int old;