|
@@ -103,38 +103,14 @@ static inline void debug_spin_unlock(raw_spinlock_t *lock)
|
|
|
lock->owner_cpu = -1;
|
|
|
}
|
|
|
|
|
|
-static void __spin_lock_debug(raw_spinlock_t *lock)
|
|
|
-{
|
|
|
- u64 i;
|
|
|
- u64 loops = loops_per_jiffy * HZ;
|
|
|
-
|
|
|
- for (i = 0; i < loops; i++) {
|
|
|
- if (arch_spin_trylock(&lock->raw_lock))
|
|
|
- return;
|
|
|
- __delay(1);
|
|
|
- }
|
|
|
- /* lockup suspected: */
|
|
|
- spin_dump(lock, "lockup suspected");
|
|
|
-#ifdef CONFIG_SMP
|
|
|
- trigger_all_cpu_backtrace();
|
|
|
-#endif
|
|
|
-
|
|
|
- /*
|
|
|
- * The trylock above was causing a livelock. Give the lower level arch
|
|
|
- * specific lock code a chance to acquire the lock. We have already
|
|
|
- * printed a warning/backtrace at this point. The non-debug arch
|
|
|
- * specific code might actually succeed in acquiring the lock. If it is
|
|
|
- * not successful, the end-result is the same - there is no forward
|
|
|
- * progress.
|
|
|
- */
|
|
|
- arch_spin_lock(&lock->raw_lock);
|
|
|
-}
|
|
|
-
|
|
|
+/*
|
|
|
+ * We are now relying on the NMI watchdog to detect lockup instead of doing
|
|
|
+ * the detection here with an unfair lock which can cause problem of its own.
|
|
|
+ */
|
|
|
void do_raw_spin_lock(raw_spinlock_t *lock)
|
|
|
{
|
|
|
debug_spin_lock_before(lock);
|
|
|
- if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
|
|
|
- __spin_lock_debug(lock);
|
|
|
+ arch_spin_lock(&lock->raw_lock);
|
|
|
debug_spin_lock_after(lock);
|
|
|
}
|
|
|
|
|
@@ -172,32 +148,6 @@ static void rwlock_bug(rwlock_t *lock, const char *msg)
|
|
|
|
|
|
#define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
|
|
|
|
|
|
-#if 0 /* __write_lock_debug() can lock up - maybe this can too? */
|
|
|
-static void __read_lock_debug(rwlock_t *lock)
|
|
|
-{
|
|
|
- u64 i;
|
|
|
- u64 loops = loops_per_jiffy * HZ;
|
|
|
- int print_once = 1;
|
|
|
-
|
|
|
- for (;;) {
|
|
|
- for (i = 0; i < loops; i++) {
|
|
|
- if (arch_read_trylock(&lock->raw_lock))
|
|
|
- return;
|
|
|
- __delay(1);
|
|
|
- }
|
|
|
- /* lockup suspected: */
|
|
|
- if (print_once) {
|
|
|
- print_once = 0;
|
|
|
- printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
|
|
|
- "%s/%d, %p\n",
|
|
|
- raw_smp_processor_id(), current->comm,
|
|
|
- current->pid, lock);
|
|
|
- dump_stack();
|
|
|
- }
|
|
|
- }
|
|
|
-}
|
|
|
-#endif
|
|
|
-
|
|
|
void do_raw_read_lock(rwlock_t *lock)
|
|
|
{
|
|
|
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
|
|
@@ -247,32 +197,6 @@ static inline void debug_write_unlock(rwlock_t *lock)
|
|
|
lock->owner_cpu = -1;
|
|
|
}
|
|
|
|
|
|
-#if 0 /* This can cause lockups */
|
|
|
-static void __write_lock_debug(rwlock_t *lock)
|
|
|
-{
|
|
|
- u64 i;
|
|
|
- u64 loops = loops_per_jiffy * HZ;
|
|
|
- int print_once = 1;
|
|
|
-
|
|
|
- for (;;) {
|
|
|
- for (i = 0; i < loops; i++) {
|
|
|
- if (arch_write_trylock(&lock->raw_lock))
|
|
|
- return;
|
|
|
- __delay(1);
|
|
|
- }
|
|
|
- /* lockup suspected: */
|
|
|
- if (print_once) {
|
|
|
- print_once = 0;
|
|
|
- printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
|
|
|
- "%s/%d, %p\n",
|
|
|
- raw_smp_processor_id(), current->comm,
|
|
|
- current->pid, lock);
|
|
|
- dump_stack();
|
|
|
- }
|
|
|
- }
|
|
|
-}
|
|
|
-#endif
|
|
|
-
|
|
|
void do_raw_write_lock(rwlock_t *lock)
|
|
|
{
|
|
|
debug_write_lock_before(lock);
|