浏览代码

[IA64] Fix 64-bit atomic routines to return "long"

These have been broken (returning "int") since the dawn of
time. But there were no users that needed the whole value
until commit
 424acaaeb3a3932d64a9b4bd59df6cf72c22d8f3
 rwsem: wake queued readers when writer blocks on active read lock

made this change:

-           (rwsem_atomic_update(0, sem) & RWSEM_ACTIVE_MASK))
-               /* Someone grabbed the sem already */
+           rwsem_atomic_update(0, sem) < RWSEM_WAITING_BIAS)
+               /* Someone grabbed the sem for write already */

RWSEM_ACTIVE_MASK is 0xffffffffL, so the old code only looked
at the low order 32-bits. The new code needs to see all 64 bits.

Signed-off-by: Tony Luck <tony.luck@intel.com>
Tony Luck 15 年之前
父节点
当前提交
01d69a82e1
共有 1 个文件被更改,包括 4 次插入4 次删除
  1. 4 4
      arch/ia64/include/asm/atomic.h

+ 4 - 4
arch/ia64/include/asm/atomic.h

@@ -41,7 +41,7 @@ ia64_atomic_add (int i, atomic_t *v)
 	return new;
 	return new;
 }
 }
 
 
-static __inline__ int
+static __inline__ long
 ia64_atomic64_add (__s64 i, atomic64_t *v)
 ia64_atomic64_add (__s64 i, atomic64_t *v)
 {
 {
 	__s64 old, new;
 	__s64 old, new;
@@ -69,7 +69,7 @@ ia64_atomic_sub (int i, atomic_t *v)
 	return new;
 	return new;
 }
 }
 
 
-static __inline__ int
+static __inline__ long
 ia64_atomic64_sub (__s64 i, atomic64_t *v)
 ia64_atomic64_sub (__s64 i, atomic64_t *v)
 {
 {
 	__s64 old, new;
 	__s64 old, new;
@@ -107,7 +107,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
 
 
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
 
 
-static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
 {
 {
 	long c, old;
 	long c, old;
 	c = atomic64_read(v);
 	c = atomic64_read(v);
@@ -158,7 +158,7 @@ atomic_add_negative (int i, atomic_t *v)
 	return atomic_add_return(i, v) < 0;
 	return atomic_add_return(i, v) < 0;
 }
 }
 
 
-static __inline__ int
+static __inline__ long
 atomic64_add_negative (__s64 i, atomic64_t *v)
 atomic64_add_negative (__s64 i, atomic64_t *v)
 {
 {
 	return atomic64_add_return(i, v) < 0;
 	return atomic64_add_return(i, v) < 0;