|
@@ -106,12 +106,45 @@ static void internal_dev_destructor(struct net_device *dev)
|
|
free_netdev(dev);
|
|
free_netdev(dev);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static struct rtnl_link_stats64 *
|
|
|
|
+internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
|
|
|
+{
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ memset(stats, 0, sizeof(*stats));
|
|
|
|
+ stats->rx_errors = dev->stats.rx_errors;
|
|
|
|
+ stats->tx_errors = dev->stats.tx_errors;
|
|
|
|
+ stats->tx_dropped = dev->stats.tx_dropped;
|
|
|
|
+ stats->rx_dropped = dev->stats.rx_dropped;
|
|
|
|
+
|
|
|
|
+ for_each_possible_cpu(i) {
|
|
|
|
+ const struct pcpu_sw_netstats *percpu_stats;
|
|
|
|
+ struct pcpu_sw_netstats local_stats;
|
|
|
|
+ unsigned int start;
|
|
|
|
+
|
|
|
|
+ percpu_stats = per_cpu_ptr(dev->tstats, i);
|
|
|
|
+
|
|
|
|
+ do {
|
|
|
|
+ start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
|
|
|
|
+ local_stats = *percpu_stats;
|
|
|
|
+ } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
|
|
|
|
+
|
|
|
|
+ stats->rx_bytes += local_stats.rx_bytes;
|
|
|
|
+ stats->rx_packets += local_stats.rx_packets;
|
|
|
|
+ stats->tx_bytes += local_stats.tx_bytes;
|
|
|
|
+ stats->tx_packets += local_stats.tx_packets;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return stats;
|
|
|
|
+}
|
|
|
|
+
|
|
static const struct net_device_ops internal_dev_netdev_ops = {
|
|
static const struct net_device_ops internal_dev_netdev_ops = {
|
|
.ndo_open = internal_dev_open,
|
|
.ndo_open = internal_dev_open,
|
|
.ndo_stop = internal_dev_stop,
|
|
.ndo_stop = internal_dev_stop,
|
|
.ndo_start_xmit = internal_dev_xmit,
|
|
.ndo_start_xmit = internal_dev_xmit,
|
|
.ndo_set_mac_address = eth_mac_addr,
|
|
.ndo_set_mac_address = eth_mac_addr,
|
|
.ndo_change_mtu = internal_dev_change_mtu,
|
|
.ndo_change_mtu = internal_dev_change_mtu,
|
|
|
|
+ .ndo_get_stats64 = internal_get_stats,
|
|
};
|
|
};
|
|
|
|
|
|
static struct rtnl_link_ops internal_dev_link_ops __read_mostly = {
|
|
static struct rtnl_link_ops internal_dev_link_ops __read_mostly = {
|
|
@@ -161,6 +194,11 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
|
|
err = -ENOMEM;
|
|
err = -ENOMEM;
|
|
goto error_free_vport;
|
|
goto error_free_vport;
|
|
}
|
|
}
|
|
|
|
+ vport->dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
|
|
|
|
+ if (!vport->dev->tstats) {
|
|
|
|
+ err = -ENOMEM;
|
|
|
|
+ goto error_free_netdev;
|
|
|
|
+ }
|
|
|
|
|
|
dev_net_set(vport->dev, ovs_dp_get_net(vport->dp));
|
|
dev_net_set(vport->dev, ovs_dp_get_net(vport->dp));
|
|
internal_dev = internal_dev_priv(vport->dev);
|
|
internal_dev = internal_dev_priv(vport->dev);
|
|
@@ -173,7 +211,7 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
|
|
rtnl_lock();
|
|
rtnl_lock();
|
|
err = register_netdevice(vport->dev);
|
|
err = register_netdevice(vport->dev);
|
|
if (err)
|
|
if (err)
|
|
- goto error_free_netdev;
|
|
|
|
|
|
+ goto error_unlock;
|
|
|
|
|
|
dev_set_promiscuity(vport->dev, 1);
|
|
dev_set_promiscuity(vport->dev, 1);
|
|
rtnl_unlock();
|
|
rtnl_unlock();
|
|
@@ -181,8 +219,10 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
|
|
|
|
|
|
return vport;
|
|
return vport;
|
|
|
|
|
|
-error_free_netdev:
|
|
|
|
|
|
+error_unlock:
|
|
rtnl_unlock();
|
|
rtnl_unlock();
|
|
|
|
+ free_percpu(vport->dev->tstats);
|
|
|
|
+error_free_netdev:
|
|
free_netdev(vport->dev);
|
|
free_netdev(vport->dev);
|
|
error_free_vport:
|
|
error_free_vport:
|
|
ovs_vport_free(vport);
|
|
ovs_vport_free(vport);
|
|
@@ -198,7 +238,7 @@ static void internal_dev_destroy(struct vport *vport)
|
|
|
|
|
|
/* unregister_netdevice() waits for an RCU grace period. */
|
|
/* unregister_netdevice() waits for an RCU grace period. */
|
|
unregister_netdevice(vport->dev);
|
|
unregister_netdevice(vport->dev);
|
|
-
|
|
|
|
|
|
+ free_percpu(vport->dev->tstats);
|
|
rtnl_unlock();
|
|
rtnl_unlock();
|
|
}
|
|
}
|
|
|
|
|