فهرست منبع

tile: modify arch_spin_unlock_wait() semantics

Rather than trying to wait until all possible lockers have
unlocked the lock, we now only wait until the current locker
(if any) has released the lock.

The old code was correct, but the new code works more like the x86
code and thus hopefully is more appropriate under contention.
See commit 78bff1c8684f ("x86/ticketlock: Fix spin_unlock_wait()
livelock") for x86.

Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
Chris Metcalf 10 سال پیش
والد
کامیت
14c3dec2a8
2فایلهای تغییر یافته به همراه20 افزوده شده و 2 حذف شده
  1. 10 1
      arch/tile/lib/spinlock_32.c
  2. 10 1
      arch/tile/lib/spinlock_64.c

+ 10 - 1
arch/tile/lib/spinlock_32.c

@@ -65,8 +65,17 @@ EXPORT_SYMBOL(arch_spin_trylock);
 void arch_spin_unlock_wait(arch_spinlock_t *lock)
 {
 	u32 iterations = 0;
-	while (arch_spin_is_locked(lock))
+	int curr = READ_ONCE(lock->current_ticket);
+	int next = READ_ONCE(lock->next_ticket);
+
+	/* Return immediately if unlocked. */
+	if (next == curr)
+		return;
+
+	/* Wait until the current locker has released the lock. */
+	do {
 		delay_backoff(iterations++);
+	} while (READ_ONCE(lock->current_ticket) == curr);
 }
 EXPORT_SYMBOL(arch_spin_unlock_wait);
 

+ 10 - 1
arch/tile/lib/spinlock_64.c

@@ -65,8 +65,17 @@ EXPORT_SYMBOL(arch_spin_trylock);
 void arch_spin_unlock_wait(arch_spinlock_t *lock)
 {
 	u32 iterations = 0;
-	while (arch_spin_is_locked(lock))
+	u32 val = READ_ONCE(lock->lock);
+	u32 curr = arch_spin_current(val);
+
+	/* Return immediately if unlocked. */
+	if (arch_spin_next(val) == curr)
+		return;
+
+	/* Wait until the current locker has released the lock. */
+	do {
 		delay_backoff(iterations++);
+	} while (arch_spin_current(READ_ONCE(lock->lock)) == curr);
 }
 EXPORT_SYMBOL(arch_spin_unlock_wait);