|
@@ -110,18 +110,18 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
|
|
|
sizeof(*(ptr))))
|
|
|
|
|
|
/*
|
|
|
- * On ARC700, EX insn is inherently atomic, so by default "vanilla" xchg() need
|
|
|
- * not require any locking. However there's a quirk.
|
|
|
- * ARC lacks native CMPXCHG, thus emulated (see above), using external locking -
|
|
|
- * incidently it "reuses" the same atomic_ops_lock used by atomic APIs.
|
|
|
- * Now, llist code uses cmpxchg() and xchg() on same data, so xchg() needs to
|
|
|
- * abide by same serializing rules, thus ends up using atomic_ops_lock as well.
|
|
|
+ * xchg() maps directly to ARC EX instruction which guarantees atomicity.
|
|
|
+ * However in !LLSC config, it also needs to be use @atomic_ops_lock spinlock
|
|
|
+ * due to a subtle reason:
|
|
|
+ * - For !LLSC, cmpxchg() needs to use that lock (see above) and there is lot
|
|
|
+ * of kernel code which calls xchg()/cmpxchg() on same data (see llist.h)
|
|
|
+ * Hence xchg() needs to follow same locking rules.
|
|
|
*
|
|
|
- * This however is only relevant if SMP and/or ARC lacks LLSC
|
|
|
- * if (UP or LLSC)
|
|
|
- * xchg doesn't need serialization
|
|
|
- * else <==> !(UP or LLSC) <==> (!UP and !LLSC) <==> (SMP and !LLSC)
|
|
|
- * xchg needs serialization
|
|
|
+ * Technically the lock is also needed for UP (boils down to irq save/restore)
|
|
|
+ * but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to
|
|
|
+ * be disabled thus can't possibly be interrpted/preempted/clobbered by xchg()
|
|
|
+ * Other way around, xchg is one instruction anyways, so can't be interrupted
|
|
|
+ * as such
|
|
|
*/
|
|
|
|
|
|
#if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
|