|
@@ -49,6 +49,7 @@ struct nf_conncount_tuple {
|
|
struct nf_conntrack_zone zone;
|
|
struct nf_conntrack_zone zone;
|
|
int cpu;
|
|
int cpu;
|
|
u32 jiffies32;
|
|
u32 jiffies32;
|
|
|
|
+ bool dead;
|
|
struct rcu_head rcu_head;
|
|
struct rcu_head rcu_head;
|
|
};
|
|
};
|
|
|
|
|
|
@@ -106,15 +107,16 @@ nf_conncount_add(struct nf_conncount_list *list,
|
|
conn->zone = *zone;
|
|
conn->zone = *zone;
|
|
conn->cpu = raw_smp_processor_id();
|
|
conn->cpu = raw_smp_processor_id();
|
|
conn->jiffies32 = (u32)jiffies;
|
|
conn->jiffies32 = (u32)jiffies;
|
|
- spin_lock(&list->list_lock);
|
|
|
|
|
|
+ conn->dead = false;
|
|
|
|
+ spin_lock_bh(&list->list_lock);
|
|
if (list->dead == true) {
|
|
if (list->dead == true) {
|
|
kmem_cache_free(conncount_conn_cachep, conn);
|
|
kmem_cache_free(conncount_conn_cachep, conn);
|
|
- spin_unlock(&list->list_lock);
|
|
|
|
|
|
+ spin_unlock_bh(&list->list_lock);
|
|
return NF_CONNCOUNT_SKIP;
|
|
return NF_CONNCOUNT_SKIP;
|
|
}
|
|
}
|
|
list_add_tail(&conn->node, &list->head);
|
|
list_add_tail(&conn->node, &list->head);
|
|
list->count++;
|
|
list->count++;
|
|
- spin_unlock(&list->list_lock);
|
|
|
|
|
|
+ spin_unlock_bh(&list->list_lock);
|
|
return NF_CONNCOUNT_ADDED;
|
|
return NF_CONNCOUNT_ADDED;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(nf_conncount_add);
|
|
EXPORT_SYMBOL_GPL(nf_conncount_add);
|
|
@@ -132,19 +134,22 @@ static bool conn_free(struct nf_conncount_list *list,
|
|
{
|
|
{
|
|
bool free_entry = false;
|
|
bool free_entry = false;
|
|
|
|
|
|
- spin_lock(&list->list_lock);
|
|
|
|
|
|
+ spin_lock_bh(&list->list_lock);
|
|
|
|
|
|
- if (list->count == 0) {
|
|
|
|
- spin_unlock(&list->list_lock);
|
|
|
|
- return free_entry;
|
|
|
|
|
|
+ if (conn->dead) {
|
|
|
|
+ spin_unlock_bh(&list->list_lock);
|
|
|
|
+ return free_entry;
|
|
}
|
|
}
|
|
|
|
|
|
list->count--;
|
|
list->count--;
|
|
|
|
+ conn->dead = true;
|
|
list_del_rcu(&conn->node);
|
|
list_del_rcu(&conn->node);
|
|
- if (list->count == 0)
|
|
|
|
|
|
+ if (list->count == 0) {
|
|
|
|
+ list->dead = true;
|
|
free_entry = true;
|
|
free_entry = true;
|
|
|
|
+ }
|
|
|
|
|
|
- spin_unlock(&list->list_lock);
|
|
|
|
|
|
+ spin_unlock_bh(&list->list_lock);
|
|
call_rcu(&conn->rcu_head, __conn_free);
|
|
call_rcu(&conn->rcu_head, __conn_free);
|
|
return free_entry;
|
|
return free_entry;
|
|
}
|
|
}
|
|
@@ -245,7 +250,7 @@ void nf_conncount_list_init(struct nf_conncount_list *list)
|
|
{
|
|
{
|
|
spin_lock_init(&list->list_lock);
|
|
spin_lock_init(&list->list_lock);
|
|
INIT_LIST_HEAD(&list->head);
|
|
INIT_LIST_HEAD(&list->head);
|
|
- list->count = 1;
|
|
|
|
|
|
+ list->count = 0;
|
|
list->dead = false;
|
|
list->dead = false;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(nf_conncount_list_init);
|
|
EXPORT_SYMBOL_GPL(nf_conncount_list_init);
|
|
@@ -259,6 +264,7 @@ bool nf_conncount_gc_list(struct net *net,
|
|
struct nf_conn *found_ct;
|
|
struct nf_conn *found_ct;
|
|
unsigned int collected = 0;
|
|
unsigned int collected = 0;
|
|
bool free_entry = false;
|
|
bool free_entry = false;
|
|
|
|
+ bool ret = false;
|
|
|
|
|
|
list_for_each_entry_safe(conn, conn_n, &list->head, node) {
|
|
list_for_each_entry_safe(conn, conn_n, &list->head, node) {
|
|
found = find_or_evict(net, list, conn, &free_entry);
|
|
found = find_or_evict(net, list, conn, &free_entry);
|
|
@@ -288,7 +294,15 @@ bool nf_conncount_gc_list(struct net *net,
|
|
if (collected > CONNCOUNT_GC_MAX_NODES)
|
|
if (collected > CONNCOUNT_GC_MAX_NODES)
|
|
return false;
|
|
return false;
|
|
}
|
|
}
|
|
- return false;
|
|
|
|
|
|
+
|
|
|
|
+ spin_lock_bh(&list->list_lock);
|
|
|
|
+ if (!list->count) {
|
|
|
|
+ list->dead = true;
|
|
|
|
+ ret = true;
|
|
|
|
+ }
|
|
|
|
+ spin_unlock_bh(&list->list_lock);
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(nf_conncount_gc_list);
|
|
EXPORT_SYMBOL_GPL(nf_conncount_gc_list);
|
|
|
|
|
|
@@ -309,11 +323,8 @@ static void tree_nodes_free(struct rb_root *root,
|
|
while (gc_count) {
|
|
while (gc_count) {
|
|
rbconn = gc_nodes[--gc_count];
|
|
rbconn = gc_nodes[--gc_count];
|
|
spin_lock(&rbconn->list.list_lock);
|
|
spin_lock(&rbconn->list.list_lock);
|
|
- if (rbconn->list.count == 0 && rbconn->list.dead == false) {
|
|
|
|
- rbconn->list.dead = true;
|
|
|
|
- rb_erase(&rbconn->node, root);
|
|
|
|
- call_rcu(&rbconn->rcu_head, __tree_nodes_free);
|
|
|
|
- }
|
|
|
|
|
|
+ rb_erase(&rbconn->node, root);
|
|
|
|
+ call_rcu(&rbconn->rcu_head, __tree_nodes_free);
|
|
spin_unlock(&rbconn->list.list_lock);
|
|
spin_unlock(&rbconn->list.list_lock);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -414,6 +425,7 @@ insert_tree(struct net *net,
|
|
nf_conncount_list_init(&rbconn->list);
|
|
nf_conncount_list_init(&rbconn->list);
|
|
list_add(&conn->node, &rbconn->list.head);
|
|
list_add(&conn->node, &rbconn->list.head);
|
|
count = 1;
|
|
count = 1;
|
|
|
|
+ rbconn->list.count = count;
|
|
|
|
|
|
rb_link_node(&rbconn->node, parent, rbnode);
|
|
rb_link_node(&rbconn->node, parent, rbnode);
|
|
rb_insert_color(&rbconn->node, root);
|
|
rb_insert_color(&rbconn->node, root);
|