|
@@ -2576,6 +2576,78 @@ dealloc_counters:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static const char const *names[] = {
|
|
|
+ "rx_write_requests",
|
|
|
+ "rx_read_requests",
|
|
|
+ "rx_atomic_requests",
|
|
|
+ "out_of_buffer",
|
|
|
+ "out_of_sequence",
|
|
|
+ "duplicate_request",
|
|
|
+ "rnr_nak_retry_err",
|
|
|
+ "packet_seq_err",
|
|
|
+ "implied_nak_seq_err",
|
|
|
+ "local_ack_timeout_err",
|
|
|
+};
|
|
|
+
|
|
|
+static const size_t stats_offsets[] = {
|
|
|
+ MLX5_BYTE_OFF(query_q_counter_out, rx_write_requests),
|
|
|
+ MLX5_BYTE_OFF(query_q_counter_out, rx_read_requests),
|
|
|
+ MLX5_BYTE_OFF(query_q_counter_out, rx_atomic_requests),
|
|
|
+ MLX5_BYTE_OFF(query_q_counter_out, out_of_buffer),
|
|
|
+ MLX5_BYTE_OFF(query_q_counter_out, out_of_sequence),
|
|
|
+ MLX5_BYTE_OFF(query_q_counter_out, duplicate_request),
|
|
|
+ MLX5_BYTE_OFF(query_q_counter_out, rnr_nak_retry_err),
|
|
|
+ MLX5_BYTE_OFF(query_q_counter_out, packet_seq_err),
|
|
|
+ MLX5_BYTE_OFF(query_q_counter_out, implied_nak_seq_err),
|
|
|
+ MLX5_BYTE_OFF(query_q_counter_out, local_ack_timeout_err),
|
|
|
+};
|
|
|
+
|
|
|
+static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
|
|
|
+ u8 port_num)
|
|
|
+{
|
|
|
+ BUILD_BUG_ON(ARRAY_SIZE(names) != ARRAY_SIZE(stats_offsets));
|
|
|
+
|
|
|
+ /* We support only per port stats */
|
|
|
+ if (port_num == 0)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ return rdma_alloc_hw_stats_struct(names, ARRAY_SIZE(names),
|
|
|
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
|
|
|
+}
|
|
|
+
|
|
|
+static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
|
|
|
+ struct rdma_hw_stats *stats,
|
|
|
+ u8 port, int index)
|
|
|
+{
|
|
|
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
|
|
|
+ int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
|
|
|
+ void *out;
|
|
|
+ __be32 val;
|
|
|
+ int ret;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ if (!port || !stats)
|
|
|
+ return -ENOSYS;
|
|
|
+
|
|
|
+ out = mlx5_vzalloc(outlen);
|
|
|
+ if (!out)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ ret = mlx5_core_query_q_counter(dev->mdev,
|
|
|
+ dev->port[port - 1].q_cnt_id, 0,
|
|
|
+ out, outlen);
|
|
|
+ if (ret)
|
|
|
+ goto free;
|
|
|
+
|
|
|
+ for (i = 0; i < ARRAY_SIZE(names); i++) {
|
|
|
+ val = *(__be32 *)(out + stats_offsets[i]);
|
|
|
+ stats->value[i] = (u64)be32_to_cpu(val);
|
|
|
+ }
|
|
|
+free:
|
|
|
+ kvfree(out);
|
|
|
+ return ARRAY_SIZE(names);
|
|
|
+}
|
|
|
+
|
|
|
static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
|
|
|
{
|
|
|
struct mlx5_ib_dev *dev;
|
|
@@ -2720,6 +2792,12 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
|
|
|
(1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
|
|
|
}
|
|
|
|
|
|
+ if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt) &&
|
|
|
+ MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
|
|
|
+ dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats;
|
|
|
+ dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats;
|
|
|
+ }
|
|
|
+
|
|
|
if (MLX5_CAP_GEN(mdev, xrc)) {
|
|
|
dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
|
|
|
dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
|