|
@@ -72,7 +72,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
|
|
|
const struct sk_buff *skb)
|
|
|
{
|
|
|
struct flow_stats *stats;
|
|
|
- int cpu = smp_processor_id();
|
|
|
+ unsigned int cpu = smp_processor_id();
|
|
|
int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
|
|
|
|
|
|
stats = rcu_dereference(flow->stats[cpu]);
|
|
@@ -117,6 +117,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
|
|
|
|
|
|
rcu_assign_pointer(flow->stats[cpu],
|
|
|
new_stats);
|
|
|
+ cpumask_set_cpu(cpu, &flow->cpu_used_mask);
|
|
|
goto unlock;
|
|
|
}
|
|
|
}
|
|
@@ -144,7 +145,7 @@ void ovs_flow_stats_get(const struct sw_flow *flow,
|
|
|
memset(ovs_stats, 0, sizeof(*ovs_stats));
|
|
|
|
|
|
/* We open code this to make sure cpu 0 is always considered */
|
|
|
- for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpu_possible_mask)) {
|
|
|
+ for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
|
|
|
struct flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
|
|
|
|
|
|
if (stats) {
|
|
@@ -168,7 +169,7 @@ void ovs_flow_stats_clear(struct sw_flow *flow)
|
|
|
int cpu;
|
|
|
|
|
|
/* We open code this to make sure cpu 0 is always considered */
|
|
|
- for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpu_possible_mask)) {
|
|
|
+ for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
|
|
|
struct flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
|
|
|
|
|
|
if (stats) {
|