|
@@ -610,7 +610,9 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
|
|
static inline int arch_read_trylock(arch_rwlock_t *rw)
|
|
|
{
|
|
|
int ret = 0;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
+ local_irq_save(flags);
|
|
|
arch_spin_lock(&(rw->lock_mutex));
|
|
|
|
|
|
/*
|
|
@@ -623,6 +625,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
|
|
|
}
|
|
|
|
|
|
arch_spin_unlock(&(rw->lock_mutex));
|
|
|
+ local_irq_restore(flags);
|
|
|
|
|
|
smp_mb();
|
|
|
return ret;
|
|
@@ -632,7 +635,9 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
|
|
|
static inline int arch_write_trylock(arch_rwlock_t *rw)
|
|
|
{
|
|
|
int ret = 0;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
+ local_irq_save(flags);
|
|
|
arch_spin_lock(&(rw->lock_mutex));
|
|
|
|
|
|
/*
|
|
@@ -646,6 +651,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
|
|
|
ret = 1;
|
|
|
}
|
|
|
arch_spin_unlock(&(rw->lock_mutex));
|
|
|
+ local_irq_restore(flags);
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
@@ -664,16 +670,24 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
|
|
|
|
|
|
static inline void arch_read_unlock(arch_rwlock_t *rw)
|
|
|
{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ local_irq_save(flags);
|
|
|
arch_spin_lock(&(rw->lock_mutex));
|
|
|
rw->counter++;
|
|
|
arch_spin_unlock(&(rw->lock_mutex));
|
|
|
+ local_irq_restore(flags);
|
|
|
}
|
|
|
|
|
|
static inline void arch_write_unlock(arch_rwlock_t *rw)
|
|
|
{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ local_irq_save(flags);
|
|
|
arch_spin_lock(&(rw->lock_mutex));
|
|
|
rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
|
|
|
arch_spin_unlock(&(rw->lock_mutex));
|
|
|
+ local_irq_restore(flags);
|
|
|
}
|
|
|
|
|
|
#endif
|