|
@@ -1190,7 +1190,50 @@ static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
|
|
|
mask, names);
|
|
|
}
|
|
|
|
|
|
-static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
|
|
|
+static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
|
|
|
+ struct rtnl_link_stats64 *core_stats)
|
|
|
+{
|
|
|
+ DECLARE_BITMAP(mask, EF10_STAT_COUNT);
|
|
|
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
|
|
|
+ u64 *stats = nic_data->stats;
|
|
|
+ size_t stats_count = 0, index;
|
|
|
+
|
|
|
+ efx_ef10_get_stat_mask(efx, mask);
|
|
|
+
|
|
|
+ if (full_stats) {
|
|
|
+ for_each_set_bit(index, mask, EF10_STAT_COUNT) {
|
|
|
+ if (efx_ef10_stat_desc[index].name) {
|
|
|
+ *full_stats++ = stats[index];
|
|
|
+ ++stats_count;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (core_stats) {
|
|
|
+ core_stats->rx_packets = stats[EF10_STAT_port_rx_packets];
|
|
|
+ core_stats->tx_packets = stats[EF10_STAT_port_tx_packets];
|
|
|
+ core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes];
|
|
|
+ core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes];
|
|
|
+ core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] +
|
|
|
+ stats[GENERIC_STAT_rx_nodesc_trunc] +
|
|
|
+ stats[GENERIC_STAT_rx_noskb_drops];
|
|
|
+ core_stats->multicast = stats[EF10_STAT_port_rx_multicast];
|
|
|
+ core_stats->rx_length_errors =
|
|
|
+ stats[EF10_STAT_port_rx_gtjumbo] +
|
|
|
+ stats[EF10_STAT_port_rx_length_error];
|
|
|
+ core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad];
|
|
|
+ core_stats->rx_frame_errors =
|
|
|
+ stats[EF10_STAT_port_rx_align_error];
|
|
|
+ core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow];
|
|
|
+ core_stats->rx_errors = (core_stats->rx_length_errors +
|
|
|
+ core_stats->rx_crc_errors +
|
|
|
+ core_stats->rx_frame_errors);
|
|
|
+ }
|
|
|
+
|
|
|
+ return stats_count;
|
|
|
+}
|
|
|
+
|
|
|
+static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx)
|
|
|
{
|
|
|
struct efx_ef10_nic_data *nic_data = efx->nic_data;
|
|
|
DECLARE_BITMAP(mask, EF10_STAT_COUNT);
|
|
@@ -1227,57 +1270,83 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
|
|
|
}
|
|
|
|
|
|
|
|
|
-static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
|
|
|
- struct rtnl_link_stats64 *core_stats)
|
|
|
+static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats,
|
|
|
+ struct rtnl_link_stats64 *core_stats)
|
|
|
{
|
|
|
- DECLARE_BITMAP(mask, EF10_STAT_COUNT);
|
|
|
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
|
|
|
- u64 *stats = nic_data->stats;
|
|
|
- size_t stats_count = 0, index;
|
|
|
int retry;
|
|
|
|
|
|
- efx_ef10_get_stat_mask(efx, mask);
|
|
|
-
|
|
|
/* If we're unlucky enough to read statistics during the DMA, wait
|
|
|
* up to 10ms for it to finish (typically takes <500us)
|
|
|
*/
|
|
|
for (retry = 0; retry < 100; ++retry) {
|
|
|
- if (efx_ef10_try_update_nic_stats(efx) == 0)
|
|
|
+ if (efx_ef10_try_update_nic_stats_pf(efx) == 0)
|
|
|
break;
|
|
|
udelay(100);
|
|
|
}
|
|
|
|
|
|
- if (full_stats) {
|
|
|
- for_each_set_bit(index, mask, EF10_STAT_COUNT) {
|
|
|
- if (efx_ef10_stat_desc[index].name) {
|
|
|
- *full_stats++ = stats[index];
|
|
|
- ++stats_count;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
+ return efx_ef10_update_stats_common(efx, full_stats, core_stats);
|
|
|
+}
|
|
|
|
|
|
- if (core_stats) {
|
|
|
- core_stats->rx_packets = stats[EF10_STAT_port_rx_packets];
|
|
|
- core_stats->tx_packets = stats[EF10_STAT_port_tx_packets];
|
|
|
- core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes];
|
|
|
- core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes];
|
|
|
- core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] +
|
|
|
- stats[GENERIC_STAT_rx_nodesc_trunc] +
|
|
|
- stats[GENERIC_STAT_rx_noskb_drops];
|
|
|
- core_stats->multicast = stats[EF10_STAT_port_rx_multicast];
|
|
|
- core_stats->rx_length_errors =
|
|
|
- stats[EF10_STAT_port_rx_gtjumbo] +
|
|
|
- stats[EF10_STAT_port_rx_length_error];
|
|
|
- core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad];
|
|
|
- core_stats->rx_frame_errors =
|
|
|
- stats[EF10_STAT_port_rx_align_error];
|
|
|
- core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow];
|
|
|
- core_stats->rx_errors = (core_stats->rx_length_errors +
|
|
|
- core_stats->rx_crc_errors +
|
|
|
- core_stats->rx_frame_errors);
|
|
|
+static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
|
|
|
+{
|
|
|
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
|
|
|
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
|
|
|
+ DECLARE_BITMAP(mask, EF10_STAT_COUNT);
|
|
|
+ __le64 generation_start, generation_end;
|
|
|
+ u64 *stats = nic_data->stats;
|
|
|
+ u32 dma_len = MC_CMD_MAC_NSTATS * sizeof(u64);
|
|
|
+ struct efx_buffer stats_buf;
|
|
|
+ __le64 *dma_stats;
|
|
|
+ int rc;
|
|
|
+
|
|
|
+ efx_ef10_get_stat_mask(efx, mask);
|
|
|
+
|
|
|
+ rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC);
|
|
|
+ if (rc)
|
|
|
+ return rc;
|
|
|
+
|
|
|
+ dma_stats = stats_buf.addr;
|
|
|
+ dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
|
|
|
+
|
|
|
+ MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr);
|
|
|
+ MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD,
|
|
|
+ MAC_STATS_IN_DMA, true);
|
|
|
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
|
|
|
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
|
|
|
+
|
|
|
+ spin_unlock_bh(&efx->stats_lock);
|
|
|
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), NULL,
|
|
|
+ 0, NULL);
|
|
|
+ spin_lock_bh(&efx->stats_lock);
|
|
|
+ if (rc)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
|
|
|
+ if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
|
|
|
+ goto out;
|
|
|
+ rmb();
|
|
|
+ efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
|
|
|
+ stats, stats_buf.addr, false);
|
|
|
+ rmb();
|
|
|
+ generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
|
|
|
+ if (generation_end != generation_start) {
|
|
|
+ rc = -EAGAIN;
|
|
|
+ goto out;
|
|
|
}
|
|
|
|
|
|
- return stats_count;
|
|
|
+ efx_update_sw_stats(efx, stats);
|
|
|
+out:
|
|
|
+ efx_nic_free_buffer(efx, &stats_buf);
|
|
|
+ return rc;
|
|
|
+}
|
|
|
+
|
|
|
+static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats,
|
|
|
+ struct rtnl_link_stats64 *core_stats)
|
|
|
+{
|
|
|
+ if (efx_ef10_try_update_nic_stats_vf(efx))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ return efx_ef10_update_stats_common(efx, full_stats, core_stats);
|
|
|
}
|
|
|
|
|
|
static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
|
|
@@ -4122,7 +4191,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
|
|
|
.prepare_flr = efx_ef10_prepare_flr,
|
|
|
.finish_flr = efx_port_dummy_op_void,
|
|
|
.describe_stats = efx_ef10_describe_stats,
|
|
|
- .update_stats = efx_ef10_update_stats,
|
|
|
+ .update_stats = efx_ef10_update_stats_vf,
|
|
|
.start_stats = efx_port_dummy_op_void,
|
|
|
.pull_stats = efx_port_dummy_op_void,
|
|
|
.stop_stats = efx_port_dummy_op_void,
|
|
@@ -4224,7 +4293,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
|
|
|
.prepare_flr = efx_ef10_prepare_flr,
|
|
|
.finish_flr = efx_port_dummy_op_void,
|
|
|
.describe_stats = efx_ef10_describe_stats,
|
|
|
- .update_stats = efx_ef10_update_stats,
|
|
|
+ .update_stats = efx_ef10_update_stats_pf,
|
|
|
.start_stats = efx_mcdi_mac_start_stats,
|
|
|
.pull_stats = efx_mcdi_mac_pull_stats,
|
|
|
.stop_stats = efx_mcdi_mac_stop_stats,
|