|
@@ -447,7 +447,8 @@ static void death_by_timeout(unsigned long ul_conntrack)
|
|
|
static inline bool
|
|
|
nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
|
|
|
const struct nf_conntrack_tuple *tuple,
|
|
|
- const struct nf_conntrack_zone *zone)
|
|
|
+ const struct nf_conntrack_zone *zone,
|
|
|
+ const struct net *net)
|
|
|
{
|
|
|
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
|
|
|
|
|
@@ -456,7 +457,8 @@ nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
|
|
|
*/
|
|
|
return nf_ct_tuple_equal(tuple, &h->tuple) &&
|
|
|
nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
|
|
|
- nf_ct_is_confirmed(ct);
|
|
|
+ nf_ct_is_confirmed(ct) &&
|
|
|
+ net_eq(net, nf_ct_net(ct));
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -481,7 +483,7 @@ begin:
|
|
|
} while (read_seqcount_retry(&nf_conntrack_generation, sequence));
|
|
|
|
|
|
hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) {
|
|
|
- if (nf_ct_key_equal(h, tuple, zone)) {
|
|
|
+ if (nf_ct_key_equal(h, tuple, zone, net)) {
|
|
|
NF_CT_STAT_INC_ATOMIC(net, found);
|
|
|
return h;
|
|
|
}
|
|
@@ -517,7 +519,7 @@ begin:
|
|
|
!atomic_inc_not_zero(&ct->ct_general.use)))
|
|
|
h = NULL;
|
|
|
else {
|
|
|
- if (unlikely(!nf_ct_key_equal(h, tuple, zone))) {
|
|
|
+ if (unlikely(!nf_ct_key_equal(h, tuple, zone, net))) {
|
|
|
nf_ct_put(ct);
|
|
|
goto begin;
|
|
|
}
|
|
@@ -573,12 +575,12 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
|
|
|
/* See if there's one in the list already, including reverse */
|
|
|
hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
|
|
|
if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
|
|
|
- zone))
|
|
|
+ zone, net))
|
|
|
goto out;
|
|
|
|
|
|
hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
|
|
|
if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
|
|
|
- zone))
|
|
|
+ zone, net))
|
|
|
goto out;
|
|
|
|
|
|
add_timer(&ct->timeout);
|
|
@@ -663,12 +665,12 @@ __nf_conntrack_confirm(struct sk_buff *skb)
|
|
|
not in the hash. If there is, we lost race. */
|
|
|
hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
|
|
|
if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
|
|
|
- zone))
|
|
|
+ zone, net))
|
|
|
goto out;
|
|
|
|
|
|
hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
|
|
|
if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
|
|
|
- zone))
|
|
|
+ zone, net))
|
|
|
goto out;
|
|
|
|
|
|
/* Timer relative to confirmation time, not original
|
|
@@ -740,7 +742,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
|
|
|
hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {
|
|
|
ct = nf_ct_tuplehash_to_ctrack(h);
|
|
|
if (ct != ignored_conntrack &&
|
|
|
- nf_ct_key_equal(h, tuple, zone)) {
|
|
|
+ nf_ct_key_equal(h, tuple, zone, net)) {
|
|
|
NF_CT_STAT_INC_ATOMIC(net, found);
|
|
|
rcu_read_unlock();
|
|
|
return 1;
|
|
@@ -1383,7 +1385,8 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
|
|
|
if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
|
|
|
continue;
|
|
|
ct = nf_ct_tuplehash_to_ctrack(h);
|
|
|
- if (iter(ct, data))
|
|
|
+ if (net_eq(nf_ct_net(ct), net) &&
|
|
|
+ iter(ct, data))
|
|
|
goto found;
|
|
|
}
|
|
|
}
|