|
@@ -126,7 +126,8 @@ EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
|
|
|
unsigned int nf_conntrack_hash_rnd __read_mostly;
|
|
|
EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd);
|
|
|
|
|
|
-static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
|
|
|
+static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
|
|
|
+ const struct nf_conntrack_zone *zone)
|
|
|
{
|
|
|
unsigned int n;
|
|
|
|
|
@@ -135,7 +136,7 @@ static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
|
|
|
* three bytes manually.
|
|
|
*/
|
|
|
n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
|
|
|
- return jhash2((u32 *)tuple, n, zone ^ nf_conntrack_hash_rnd ^
|
|
|
+ return jhash2((u32 *)tuple, n, zone->id ^ nf_conntrack_hash_rnd ^
|
|
|
(((__force __u16)tuple->dst.u.all << 16) |
|
|
|
tuple->dst.protonum));
|
|
|
}
|
|
@@ -151,12 +152,14 @@ static u32 hash_bucket(u32 hash, const struct net *net)
|
|
|
}
|
|
|
|
|
|
static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
|
|
|
- u16 zone, unsigned int size)
|
|
|
+ const struct nf_conntrack_zone *zone,
|
|
|
+ unsigned int size)
|
|
|
{
|
|
|
return __hash_bucket(hash_conntrack_raw(tuple, zone), size);
|
|
|
}
|
|
|
|
|
|
-static inline u_int32_t hash_conntrack(const struct net *net, u16 zone,
|
|
|
+static inline u_int32_t hash_conntrack(const struct net *net,
|
|
|
+ const struct nf_conntrack_zone *zone,
|
|
|
const struct nf_conntrack_tuple *tuple)
|
|
|
{
|
|
|
return __hash_conntrack(tuple, zone, net->ct.htable_size);
|
|
@@ -288,7 +291,9 @@ static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
|
|
|
}
|
|
|
|
|
|
/* Released via destroy_conntrack() */
|
|
|
-struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags)
|
|
|
+struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
|
|
|
+ const struct nf_conntrack_zone *zone,
|
|
|
+ gfp_t flags)
|
|
|
{
|
|
|
struct nf_conn *tmpl;
|
|
|
|
|
@@ -306,7 +311,7 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags)
|
|
|
nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, GFP_ATOMIC);
|
|
|
if (!nf_ct_zone)
|
|
|
goto out_free;
|
|
|
- nf_ct_zone->id = zone;
|
|
|
+ nf_ct_zone->id = zone->id;
|
|
|
}
|
|
|
#endif
|
|
|
atomic_set(&tmpl->ct_general.use, 0);
|
|
@@ -371,11 +376,12 @@ destroy_conntrack(struct nf_conntrack *nfct)
|
|
|
|
|
|
static void nf_ct_delete_from_lists(struct nf_conn *ct)
|
|
|
{
|
|
|
+ const struct nf_conntrack_zone *zone;
|
|
|
struct net *net = nf_ct_net(ct);
|
|
|
unsigned int hash, reply_hash;
|
|
|
- u16 zone = nf_ct_zone(ct);
|
|
|
unsigned int sequence;
|
|
|
|
|
|
+ zone = nf_ct_zone(ct);
|
|
|
nf_ct_helper_destroy(ct);
|
|
|
|
|
|
local_bh_disable();
|
|
@@ -431,8 +437,8 @@ static void death_by_timeout(unsigned long ul_conntrack)
|
|
|
|
|
|
static inline bool
|
|
|
nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
|
|
|
- const struct nf_conntrack_tuple *tuple,
|
|
|
- u16 zone)
|
|
|
+ const struct nf_conntrack_tuple *tuple,
|
|
|
+ const struct nf_conntrack_zone *zone)
|
|
|
{
|
|
|
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
|
|
|
|
|
@@ -440,8 +446,8 @@ nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
|
|
|
* so we need to check that the conntrack is confirmed
|
|
|
*/
|
|
|
return nf_ct_tuple_equal(tuple, &h->tuple) &&
|
|
|
- nf_ct_zone(ct) == zone &&
|
|
|
- nf_ct_is_confirmed(ct);
|
|
|
+ nf_ct_zone_equal(ct, zone) &&
|
|
|
+ nf_ct_is_confirmed(ct);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -450,7 +456,7 @@ nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
|
|
|
* and recheck nf_ct_tuple_equal(tuple, &h->tuple)
|
|
|
*/
|
|
|
static struct nf_conntrack_tuple_hash *
|
|
|
-____nf_conntrack_find(struct net *net, u16 zone,
|
|
|
+____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
|
|
|
const struct nf_conntrack_tuple *tuple, u32 hash)
|
|
|
{
|
|
|
struct nf_conntrack_tuple_hash *h;
|
|
@@ -486,7 +492,7 @@ begin:
|
|
|
|
|
|
/* Find a connection corresponding to a tuple. */
|
|
|
static struct nf_conntrack_tuple_hash *
|
|
|
-__nf_conntrack_find_get(struct net *net, u16 zone,
|
|
|
+__nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
|
|
|
const struct nf_conntrack_tuple *tuple, u32 hash)
|
|
|
{
|
|
|
struct nf_conntrack_tuple_hash *h;
|
|
@@ -513,7 +519,7 @@ begin:
|
|
|
}
|
|
|
|
|
|
struct nf_conntrack_tuple_hash *
|
|
|
-nf_conntrack_find_get(struct net *net, u16 zone,
|
|
|
+nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
|
|
|
const struct nf_conntrack_tuple *tuple)
|
|
|
{
|
|
|
return __nf_conntrack_find_get(net, zone, tuple,
|
|
@@ -536,11 +542,11 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
|
|
|
int
|
|
|
nf_conntrack_hash_check_insert(struct nf_conn *ct)
|
|
|
{
|
|
|
+ const struct nf_conntrack_zone *zone;
|
|
|
struct net *net = nf_ct_net(ct);
|
|
|
unsigned int hash, reply_hash;
|
|
|
struct nf_conntrack_tuple_hash *h;
|
|
|
struct hlist_nulls_node *n;
|
|
|
- u16 zone;
|
|
|
unsigned int sequence;
|
|
|
|
|
|
zone = nf_ct_zone(ct);
|
|
@@ -558,12 +564,12 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
|
|
|
hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
|
|
|
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
|
|
|
&h->tuple) &&
|
|
|
- zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
|
|
|
+ nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone))
|
|
|
goto out;
|
|
|
hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
|
|
|
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
|
|
|
&h->tuple) &&
|
|
|
- zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
|
|
|
+ nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone))
|
|
|
goto out;
|
|
|
|
|
|
add_timer(&ct->timeout);
|
|
@@ -588,6 +594,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
|
|
|
int
|
|
|
__nf_conntrack_confirm(struct sk_buff *skb)
|
|
|
{
|
|
|
+ const struct nf_conntrack_zone *zone;
|
|
|
unsigned int hash, reply_hash;
|
|
|
struct nf_conntrack_tuple_hash *h;
|
|
|
struct nf_conn *ct;
|
|
@@ -596,7 +603,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
|
|
|
struct hlist_nulls_node *n;
|
|
|
enum ip_conntrack_info ctinfo;
|
|
|
struct net *net;
|
|
|
- u16 zone;
|
|
|
unsigned int sequence;
|
|
|
|
|
|
ct = nf_ct_get(skb, &ctinfo);
|
|
@@ -649,12 +655,12 @@ __nf_conntrack_confirm(struct sk_buff *skb)
|
|
|
hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
|
|
|
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
|
|
|
&h->tuple) &&
|
|
|
- zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
|
|
|
+ nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone))
|
|
|
goto out;
|
|
|
hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
|
|
|
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
|
|
|
&h->tuple) &&
|
|
|
- zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
|
|
|
+ nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone))
|
|
|
goto out;
|
|
|
|
|
|
/* Timer relative to confirmation time, not original
|
|
@@ -707,11 +713,14 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
|
|
|
const struct nf_conn *ignored_conntrack)
|
|
|
{
|
|
|
struct net *net = nf_ct_net(ignored_conntrack);
|
|
|
+ const struct nf_conntrack_zone *zone;
|
|
|
struct nf_conntrack_tuple_hash *h;
|
|
|
struct hlist_nulls_node *n;
|
|
|
struct nf_conn *ct;
|
|
|
- u16 zone = nf_ct_zone(ignored_conntrack);
|
|
|
- unsigned int hash = hash_conntrack(net, zone, tuple);
|
|
|
+ unsigned int hash;
|
|
|
+
|
|
|
+ zone = nf_ct_zone(ignored_conntrack);
|
|
|
+ hash = hash_conntrack(net, zone, tuple);
|
|
|
|
|
|
/* Disable BHs the entire time since we need to disable them at
|
|
|
* least once for the stats anyway.
|
|
@@ -721,7 +730,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
|
|
|
ct = nf_ct_tuplehash_to_ctrack(h);
|
|
|
if (ct != ignored_conntrack &&
|
|
|
nf_ct_tuple_equal(tuple, &h->tuple) &&
|
|
|
- nf_ct_zone(ct) == zone) {
|
|
|
+ nf_ct_zone_equal(ct, zone)) {
|
|
|
NF_CT_STAT_INC(net, found);
|
|
|
rcu_read_unlock_bh();
|
|
|
return 1;
|
|
@@ -810,7 +819,8 @@ void init_nf_conntrack_hash_rnd(void)
|
|
|
}
|
|
|
|
|
|
static struct nf_conn *
|
|
|
-__nf_conntrack_alloc(struct net *net, u16 zone,
|
|
|
+__nf_conntrack_alloc(struct net *net,
|
|
|
+ const struct nf_conntrack_zone *zone,
|
|
|
const struct nf_conntrack_tuple *orig,
|
|
|
const struct nf_conntrack_tuple *repl,
|
|
|
gfp_t gfp, u32 hash)
|
|
@@ -864,7 +874,7 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
|
|
|
nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC);
|
|
|
if (!nf_ct_zone)
|
|
|
goto out_free;
|
|
|
- nf_ct_zone->id = zone;
|
|
|
+ nf_ct_zone->id = zone->id;
|
|
|
}
|
|
|
#endif
|
|
|
/* Because we use RCU lookups, we set ct_general.use to zero before
|
|
@@ -881,7 +891,8 @@ out_free:
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
-struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
|
|
|
+struct nf_conn *nf_conntrack_alloc(struct net *net,
|
|
|
+ const struct nf_conntrack_zone *zone,
|
|
|
const struct nf_conntrack_tuple *orig,
|
|
|
const struct nf_conntrack_tuple *repl,
|
|
|
gfp_t gfp)
|
|
@@ -923,7 +934,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
|
|
|
struct nf_conntrack_tuple repl_tuple;
|
|
|
struct nf_conntrack_ecache *ecache;
|
|
|
struct nf_conntrack_expect *exp = NULL;
|
|
|
- u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
|
|
|
+ const struct nf_conntrack_zone *zone;
|
|
|
struct nf_conn_timeout *timeout_ext;
|
|
|
unsigned int *timeouts;
|
|
|
|
|
@@ -932,6 +943,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
+ zone = nf_ct_zone_tmpl(tmpl);
|
|
|
ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
|
|
|
hash);
|
|
|
if (IS_ERR(ct))
|
|
@@ -1026,10 +1038,10 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
|
|
|
int *set_reply,
|
|
|
enum ip_conntrack_info *ctinfo)
|
|
|
{
|
|
|
+ const struct nf_conntrack_zone *zone;
|
|
|
struct nf_conntrack_tuple tuple;
|
|
|
struct nf_conntrack_tuple_hash *h;
|
|
|
struct nf_conn *ct;
|
|
|
- u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
|
|
|
u32 hash;
|
|
|
|
|
|
if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
|
|
@@ -1040,6 +1052,7 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
|
|
|
}
|
|
|
|
|
|
/* look for tuple match */
|
|
|
+ zone = nf_ct_zone_tmpl(tmpl);
|
|
|
hash = hash_conntrack_raw(&tuple, zone);
|
|
|
h = __nf_conntrack_find_get(net, zone, &tuple, hash);
|
|
|
if (!h) {
|
|
@@ -1290,6 +1303,12 @@ bool __nf_ct_kill_acct(struct nf_conn *ct,
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
|
|
|
|
|
|
+/* Built-in default zone used e.g. by modules. */
|
|
|
+const struct nf_conntrack_zone nf_ct_zone_dflt = {
|
|
|
+ .id = NF_CT_DEFAULT_ZONE_ID,
|
|
|
+};
|
|
|
+EXPORT_SYMBOL_GPL(nf_ct_zone_dflt);
|
|
|
+
|
|
|
#ifdef CONFIG_NF_CONNTRACK_ZONES
|
|
|
static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
|
|
|
.len = sizeof(struct nf_conntrack_zone),
|