|
@@ -83,6 +83,13 @@ void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
|
|
|
spin_lock(lock);
|
|
|
while (unlikely(nf_conntrack_locks_all)) {
|
|
|
spin_unlock(lock);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Order the 'nf_conntrack_locks_all' load vs. the
|
|
|
+ * spin_unlock_wait() loads below, to ensure
|
|
|
+ * that 'nf_conntrack_locks_all_lock' is indeed held:
|
|
|
+ */
|
|
|
+ smp_rmb(); /* spin_lock(&nf_conntrack_locks_all_lock) */
|
|
|
spin_unlock_wait(&nf_conntrack_locks_all_lock);
|
|
|
spin_lock(lock);
|
|
|
}
|
|
@@ -128,6 +135,14 @@ static void nf_conntrack_all_lock(void)
|
|
|
spin_lock(&nf_conntrack_locks_all_lock);
|
|
|
nf_conntrack_locks_all = true;
|
|
|
|
|
|
+ /*
|
|
|
+ * Order the above store of 'nf_conntrack_locks_all' against
|
|
|
+ * the spin_unlock_wait() loads below, such that if
|
|
|
+ * nf_conntrack_lock() observes 'nf_conntrack_locks_all'
|
|
|
+ * we must observe nf_conntrack_locks[] held:
|
|
|
+ */
|
|
|
+ smp_mb(); /* spin_lock(&nf_conntrack_locks_all_lock) */
|
|
|
+
|
|
|
for (i = 0; i < CONNTRACK_LOCKS; i++) {
|
|
|
spin_unlock_wait(&nf_conntrack_locks[i]);
|
|
|
}
|
|
@@ -135,7 +150,13 @@ static void nf_conntrack_all_lock(void)
|
|
|
|
|
|
static void nf_conntrack_all_unlock(void)
|
|
|
{
|
|
|
- nf_conntrack_locks_all = false;
|
|
|
+ /*
|
|
|
+ * All prior stores must be complete before we clear
|
|
|
+ * 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock()
|
|
|
+ * might observe the false value but not the entire
|
|
|
+ * critical section:
|
|
|
+ */
|
|
|
+ smp_store_release(&nf_conntrack_locks_all, false);
|
|
|
spin_unlock(&nf_conntrack_locks_all_lock);
|
|
|
}
|
|
|
|