|
@@ -1153,6 +1153,7 @@ xt_replace_table(struct xt_table *table,
|
|
|
int *error)
|
|
|
{
|
|
|
struct xt_table_info *private;
|
|
|
+ unsigned int cpu;
|
|
|
int ret;
|
|
|
|
|
|
ret = xt_jumpstack_alloc(newinfo);
|
|
@@ -1182,14 +1183,28 @@ xt_replace_table(struct xt_table *table,
|
|
|
smp_wmb();
|
|
|
table->private = newinfo;
|
|
|
|
|
|
+ /* make sure all cpus see new ->private value */
|
|
|
+ smp_wmb();
|
|
|
+
|
|
|
/*
|
|
|
* Even though table entries have now been swapped, other CPU's
|
|
|
- * may still be using the old entries. This is okay, because
|
|
|
- * resynchronization happens because of the locking done
|
|
|
- * during the get_counters() routine.
|
|
|
+ * may still be using the old entries...
|
|
|
*/
|
|
|
local_bh_enable();
|
|
|
|
|
|
+ /* ... so wait for even xt_recseq on all cpus */
|
|
|
+ for_each_possible_cpu(cpu) {
|
|
|
+ seqcount_t *s = &per_cpu(xt_recseq, cpu);
|
|
|
+ u32 seq = raw_read_seqcount(s);
|
|
|
+
|
|
|
+ if (seq & 1) {
|
|
|
+ do {
|
|
|
+ cond_resched();
|
|
|
+ cpu_relax();
|
|
|
+ } while (seq == raw_read_seqcount(s));
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
#ifdef CONFIG_AUDIT
|
|
|
if (audit_enabled) {
|
|
|
audit_log(current->audit_context, GFP_KERNEL,
|