|
@@ -96,19 +96,26 @@ static struct conntrack_gc_work conntrack_gc_work;
|
|
|
|
|
|
void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
|
|
|
{
|
|
|
+ /* 1) Acquire the lock */
|
|
|
spin_lock(lock);
|
|
|
- while (unlikely(nf_conntrack_locks_all)) {
|
|
|
- spin_unlock(lock);
|
|
|
|
|
|
- /*
|
|
|
- * Order the 'nf_conntrack_locks_all' load vs. the
|
|
|
- * spin_unlock_wait() loads below, to ensure
|
|
|
- * that 'nf_conntrack_locks_all_lock' is indeed held:
|
|
|
- */
|
|
|
- smp_rmb(); /* spin_lock(&nf_conntrack_locks_all_lock) */
|
|
|
- spin_unlock_wait(&nf_conntrack_locks_all_lock);
|
|
|
- spin_lock(lock);
|
|
|
- }
|
|
|
+ /* 2) read nf_conntrack_locks_all, with ACQUIRE semantics
|
|
|
+ * It pairs with the smp_store_release() in nf_conntrack_all_unlock()
|
|
|
+ */
|
|
|
+ if (likely(smp_load_acquire(&nf_conntrack_locks_all) == false))
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* fast path failed, unlock */
|
|
|
+ spin_unlock(lock);
|
|
|
+
|
|
|
+ /* Slow path 1) get global lock */
|
|
|
+ spin_lock(&nf_conntrack_locks_all_lock);
|
|
|
+
|
|
|
+ /* Slow path 2) get the lock we want */
|
|
|
+ spin_lock(lock);
|
|
|
+
|
|
|
+ /* Slow path 3) release the global lock */
|
|
|
+ spin_unlock(&nf_conntrack_locks_all_lock);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(nf_conntrack_lock);
|
|
|
|
|
@@ -149,28 +156,27 @@ static void nf_conntrack_all_lock(void)
|
|
|
int i;
|
|
|
|
|
|
spin_lock(&nf_conntrack_locks_all_lock);
|
|
|
- nf_conntrack_locks_all = true;
|
|
|
|
|
|
- /*
|
|
|
- * Order the above store of 'nf_conntrack_locks_all' against
|
|
|
- * the spin_unlock_wait() loads below, such that if
|
|
|
- * nf_conntrack_lock() observes 'nf_conntrack_locks_all'
|
|
|
- * we must observe nf_conntrack_locks[] held:
|
|
|
- */
|
|
|
- smp_mb(); /* spin_lock(&nf_conntrack_locks_all_lock) */
|
|
|
+ nf_conntrack_locks_all = true;
|
|
|
|
|
|
for (i = 0; i < CONNTRACK_LOCKS; i++) {
|
|
|
- spin_unlock_wait(&nf_conntrack_locks[i]);
|
|
|
+ spin_lock(&nf_conntrack_locks[i]);
|
|
|
+
|
|
|
+ /* This spin_unlock provides the "release" to ensure that
|
|
|
+ * nf_conntrack_locks_all==true is visible to everyone that
|
|
|
+ * acquired spin_lock(&nf_conntrack_locks[]).
|
|
|
+ */
|
|
|
+ spin_unlock(&nf_conntrack_locks[i]);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
static void nf_conntrack_all_unlock(void)
|
|
|
{
|
|
|
- /*
|
|
|
- * All prior stores must be complete before we clear
|
|
|
+ /* All prior stores must be complete before we clear
|
|
|
* 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock()
|
|
|
* might observe the false value but not the entire
|
|
|
- * critical section:
|
|
|
+ * critical section.
|
|
|
+ * It pairs with the smp_load_acquire() in nf_conntrack_lock()
|
|
|
*/
|
|
|
smp_store_release(&nf_conntrack_locks_all, false);
|
|
|
spin_unlock(&nf_conntrack_locks_all_lock);
|