|
@@ -65,8 +65,9 @@ void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb)
|
|
|
{
|
|
|
struct flow_stats *stats;
|
|
|
__be16 tcp_flags = 0;
|
|
|
+ int node = numa_node_id();
|
|
|
|
|
|
- stats = this_cpu_ptr(flow->stats);
|
|
|
+ stats = rcu_dereference(flow->stats[node]);
|
|
|
|
|
|
if ((flow->key.eth.type == htons(ETH_P_IP) ||
|
|
|
flow->key.eth.type == htons(ETH_P_IPV6)) &&
|
|
@@ -76,68 +77,102 @@ void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb)
|
|
|
tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb));
|
|
|
}
|
|
|
|
|
|
- spin_lock(&stats->lock);
|
|
|
+ /* Check if already have node-specific stats. */
|
|
|
+ if (likely(stats)) {
|
|
|
+ spin_lock(&stats->lock);
|
|
|
+ /* Mark if we write on the pre-allocated stats. */
|
|
|
+ if (node == 0 && unlikely(flow->stats_last_writer != node))
|
|
|
+ flow->stats_last_writer = node;
|
|
|
+ } else {
|
|
|
+ stats = rcu_dereference(flow->stats[0]); /* Pre-allocated. */
|
|
|
+ spin_lock(&stats->lock);
|
|
|
+
|
|
|
+ /* If the current NUMA-node is the only writer on the
|
|
|
+ * pre-allocated stats keep using them.
|
|
|
+ */
|
|
|
+ if (unlikely(flow->stats_last_writer != node)) {
|
|
|
+ /* A previous locker may have already allocated the
|
|
|
+ * stats, so we need to check again. If node-specific
|
|
|
+ * stats were already allocated, we update the pre-
|
|
|
+ * allocated stats as we have already locked them.
|
|
|
+ */
|
|
|
+ if (likely(flow->stats_last_writer != NUMA_NO_NODE)
|
|
|
+ && likely(!rcu_dereference(flow->stats[node]))) {
|
|
|
+ /* Try to allocate node-specific stats. */
|
|
|
+ struct flow_stats *new_stats;
|
|
|
+
|
|
|
+ new_stats =
|
|
|
+ kmem_cache_alloc_node(flow_stats_cache,
|
|
|
+ GFP_THISNODE |
|
|
|
+ __GFP_NOMEMALLOC,
|
|
|
+ node);
|
|
|
+ if (likely(new_stats)) {
|
|
|
+ new_stats->used = jiffies;
|
|
|
+ new_stats->packet_count = 1;
|
|
|
+ new_stats->byte_count = skb->len;
|
|
|
+ new_stats->tcp_flags = tcp_flags;
|
|
|
+ spin_lock_init(&new_stats->lock);
|
|
|
+
|
|
|
+ rcu_assign_pointer(flow->stats[node],
|
|
|
+ new_stats);
|
|
|
+ goto unlock;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ flow->stats_last_writer = node;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
stats->used = jiffies;
|
|
|
stats->packet_count++;
|
|
|
stats->byte_count += skb->len;
|
|
|
stats->tcp_flags |= tcp_flags;
|
|
|
- spin_unlock(&stats->lock);
|
|
|
-}
|
|
|
-
|
|
|
-static void stats_read(struct flow_stats *stats,
|
|
|
- struct ovs_flow_stats *ovs_stats,
|
|
|
- unsigned long *used, __be16 *tcp_flags)
|
|
|
-{
|
|
|
- spin_lock(&stats->lock);
|
|
|
- if (!*used || time_after(stats->used, *used))
|
|
|
- *used = stats->used;
|
|
|
- *tcp_flags |= stats->tcp_flags;
|
|
|
- ovs_stats->n_packets += stats->packet_count;
|
|
|
- ovs_stats->n_bytes += stats->byte_count;
|
|
|
+unlock:
|
|
|
spin_unlock(&stats->lock);
|
|
|
}
|
|
|
|
|
|
void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *ovs_stats,
|
|
|
unsigned long *used, __be16 *tcp_flags)
|
|
|
{
|
|
|
- int cpu;
|
|
|
+ int node;
|
|
|
|
|
|
*used = 0;
|
|
|
*tcp_flags = 0;
|
|
|
memset(ovs_stats, 0, sizeof(*ovs_stats));
|
|
|
|
|
|
- local_bh_disable();
|
|
|
-
|
|
|
- for_each_possible_cpu(cpu) {
|
|
|
- struct flow_stats *stats;
|
|
|
+ for_each_node(node) {
|
|
|
+ struct flow_stats *stats = rcu_dereference(flow->stats[node]);
|
|
|
|
|
|
- stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
|
|
|
- stats_read(stats, ovs_stats, used, tcp_flags);
|
|
|
+ if (stats) {
|
|
|
+ /* Local CPU may write on non-local stats, so we must
|
|
|
+ * block bottom-halves here.
|
|
|
+ */
|
|
|
+ spin_lock_bh(&stats->lock);
|
|
|
+ if (!*used || time_after(stats->used, *used))
|
|
|
+ *used = stats->used;
|
|
|
+ *tcp_flags |= stats->tcp_flags;
|
|
|
+ ovs_stats->n_packets += stats->packet_count;
|
|
|
+ ovs_stats->n_bytes += stats->byte_count;
|
|
|
+ spin_unlock_bh(&stats->lock);
|
|
|
+ }
|
|
|
}
|
|
|
-
|
|
|
- local_bh_enable();
|
|
|
-}
|
|
|
-
|
|
|
-static void stats_reset(struct flow_stats *stats)
|
|
|
-{
|
|
|
- spin_lock(&stats->lock);
|
|
|
- stats->used = 0;
|
|
|
- stats->packet_count = 0;
|
|
|
- stats->byte_count = 0;
|
|
|
- stats->tcp_flags = 0;
|
|
|
- spin_unlock(&stats->lock);
|
|
|
}
|
|
|
|
|
|
void ovs_flow_stats_clear(struct sw_flow *flow)
|
|
|
{
|
|
|
- int cpu;
|
|
|
-
|
|
|
- local_bh_disable();
|
|
|
-
|
|
|
- for_each_possible_cpu(cpu)
|
|
|
- stats_reset(per_cpu_ptr(flow->stats, cpu));
|
|
|
-
|
|
|
- local_bh_enable();
|
|
|
+ int node;
|
|
|
+
|
|
|
+ for_each_node(node) {
|
|
|
+ struct flow_stats *stats = rcu_dereference(flow->stats[node]);
|
|
|
+
|
|
|
+ if (stats) {
|
|
|
+ spin_lock_bh(&stats->lock);
|
|
|
+ stats->used = 0;
|
|
|
+ stats->packet_count = 0;
|
|
|
+ stats->byte_count = 0;
|
|
|
+ stats->tcp_flags = 0;
|
|
|
+ spin_unlock_bh(&stats->lock);
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static int check_header(struct sk_buff *skb, int len)
|