|
@@ -409,6 +409,8 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
|
|
struct nf_conntrack_expect *exp;
|
|
struct nf_conntrack_expect *exp;
|
|
const struct hlist_node *next;
|
|
const struct hlist_node *next;
|
|
const struct hlist_nulls_node *nn;
|
|
const struct hlist_nulls_node *nn;
|
|
|
|
+ unsigned int last_hsize;
|
|
|
|
+ spinlock_t *lock;
|
|
struct net *net;
|
|
struct net *net;
|
|
unsigned int i;
|
|
unsigned int i;
|
|
|
|
|
|
@@ -446,13 +448,18 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
|
|
rtnl_unlock();
|
|
rtnl_unlock();
|
|
|
|
|
|
local_bh_disable();
|
|
local_bh_disable();
|
|
- for (i = 0; i < nf_conntrack_htable_size; i++) {
|
|
|
|
- nf_conntrack_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
|
|
|
|
- if (i < nf_conntrack_htable_size) {
|
|
|
|
- hlist_nulls_for_each_entry(h, nn, &nf_conntrack_hash[i], hnnode)
|
|
|
|
- unhelp(h, me);
|
|
|
|
|
|
+restart:
|
|
|
|
+ last_hsize = nf_conntrack_htable_size;
|
|
|
|
+ for (i = 0; i < last_hsize; i++) {
|
|
|
|
+ lock = &nf_conntrack_locks[i % CONNTRACK_LOCKS];
|
|
|
|
+ nf_conntrack_lock(lock);
|
|
|
|
+ if (last_hsize != nf_conntrack_htable_size) {
|
|
|
|
+ spin_unlock(lock);
|
|
|
|
+ goto restart;
|
|
}
|
|
}
|
|
- spin_unlock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
|
|
|
|
|
|
+ hlist_nulls_for_each_entry(h, nn, &nf_conntrack_hash[i], hnnode)
|
|
|
|
+ unhelp(h, me);
|
|
|
|
+ spin_unlock(lock);
|
|
}
|
|
}
|
|
local_bh_enable();
|
|
local_bh_enable();
|
|
}
|
|
}
|