|
@@ -28,13 +28,13 @@
|
|
|
* 1 somebody owns the spinlock
|
|
|
*/
|
|
|
|
|
|
-#define __raw_spin_is_locked(x) ((x)->slock != 0)
|
|
|
-#define __raw_spin_unlock_wait(lock) \
|
|
|
- do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
|
|
|
+#define arch_spin_is_locked(x) ((x)->slock != 0)
|
|
|
+#define arch_spin_unlock_wait(lock) \
|
|
|
+ do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
|
|
|
|
|
|
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
|
|
|
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
|
|
|
|
|
|
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
|
|
+static inline void arch_spin_lock(arch_spinlock_t *lock)
|
|
|
{
|
|
|
unsigned long tmp;
|
|
|
|
|
@@ -51,7 +51,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
|
|
|
|
|
/* Returns 1 if the lock is obtained, 0 otherwise. */
|
|
|
|
|
|
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
|
|
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
|
|
{
|
|
|
unsigned long tmp;
|
|
|
|
|
@@ -67,7 +67,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
|
|
return tmp == 0 ? 1 : 0;
|
|
|
}
|
|
|
|
|
|
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
|
|
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
|
|
{
|
|
|
unsigned long tmp;
|
|
|
|
|
@@ -96,9 +96,9 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
|
|
* 0x80000000 one writer owns the rwlock, no other writers, no readers
|
|
|
*/
|
|
|
|
|
|
-#define __raw_write_can_lock(x) ((x)->lock == 0)
|
|
|
+#define arch_write_can_lock(x) ((x)->lock == 0)
|
|
|
|
|
|
-static inline void __raw_write_lock(raw_rwlock_t *rw)
|
|
|
+static inline void arch_write_lock(arch_rwlock_t *rw)
|
|
|
{
|
|
|
unsigned long tmp;
|
|
|
|
|
@@ -116,7 +116,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
|
|
|
|
|
|
/* Returns 1 if the lock is obtained, 0 otherwise. */
|
|
|
|
|
|
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
|
|
|
+static inline int arch_write_trylock(arch_rwlock_t *rw)
|
|
|
{
|
|
|
unsigned long tmp;
|
|
|
|
|
@@ -133,7 +133,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
|
|
|
return tmp == 0 ? 1 : 0;
|
|
|
}
|
|
|
|
|
|
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
|
|
|
+static inline void arch_write_unlock(arch_rwlock_t *rw)
|
|
|
{
|
|
|
unsigned long tmp;
|
|
|
|
|
@@ -145,7 +145,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
|
|
|
: "memory");
|
|
|
}
|
|
|
|
|
|
-static inline void __raw_read_lock(raw_rwlock_t *rw)
|
|
|
+static inline void arch_read_lock(arch_rwlock_t *rw)
|
|
|
{
|
|
|
unsigned long tmp;
|
|
|
unsigned long result;
|
|
@@ -164,7 +164,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
|
|
|
|
|
|
/* Returns 1 if the lock is obtained, 0 otherwise. */
|
|
|
|
|
|
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
|
|
|
+static inline int arch_read_trylock(arch_rwlock_t *rw)
|
|
|
{
|
|
|
unsigned long result;
|
|
|
unsigned long tmp;
|
|
@@ -184,7 +184,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
|
|
|
return result == 0;
|
|
|
}
|
|
|
|
|
|
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
|
|
|
+static inline void arch_read_unlock(arch_rwlock_t *rw)
|
|
|
{
|
|
|
unsigned long tmp1, tmp2;
|
|
|
|
|
@@ -199,4 +199,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
|
|
|
: "memory");
|
|
|
}
|
|
|
|
|
|
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
|
|
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
|
|
+
|
|
|
#endif /* _XTENSA_SPINLOCK_H */
|