ib_rep.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189
  1. // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
  2. /*
  3. * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
  4. */
  5. #include "ib_rep.h"
  6. static const struct mlx5_ib_profile rep_profile = {
  7. STAGE_CREATE(MLX5_IB_STAGE_INIT,
  8. mlx5_ib_stage_init_init,
  9. mlx5_ib_stage_init_cleanup),
  10. STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
  11. mlx5_ib_stage_rep_flow_db_init,
  12. NULL),
  13. STAGE_CREATE(MLX5_IB_STAGE_CAPS,
  14. mlx5_ib_stage_caps_init,
  15. NULL),
  16. STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
  17. mlx5_ib_stage_rep_non_default_cb,
  18. NULL),
  19. STAGE_CREATE(MLX5_IB_STAGE_ROCE,
  20. mlx5_ib_stage_rep_roce_init,
  21. mlx5_ib_stage_rep_roce_cleanup),
  22. STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
  23. mlx5_ib_stage_dev_res_init,
  24. mlx5_ib_stage_dev_res_cleanup),
  25. STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
  26. mlx5_ib_stage_counters_init,
  27. mlx5_ib_stage_counters_cleanup),
  28. STAGE_CREATE(MLX5_IB_STAGE_BFREG,
  29. mlx5_ib_stage_bfrag_init,
  30. mlx5_ib_stage_bfrag_cleanup),
  31. STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
  32. NULL,
  33. mlx5_ib_stage_pre_ib_reg_umr_cleanup),
  34. STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
  35. mlx5_ib_stage_ib_reg_init,
  36. mlx5_ib_stage_ib_reg_cleanup),
  37. STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
  38. mlx5_ib_stage_post_ib_reg_umr_init,
  39. NULL),
  40. };
  41. static int
  42. mlx5_ib_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
  43. {
  44. return 0;
  45. }
  46. static void
  47. mlx5_ib_nic_rep_unload(struct mlx5_eswitch_rep *rep)
  48. {
  49. rep->rep_if[REP_IB].priv = NULL;
  50. }
  51. static int
  52. mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
  53. {
  54. struct mlx5_ib_dev *ibdev;
  55. ibdev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*ibdev));
  56. if (!ibdev)
  57. return -ENOMEM;
  58. ibdev->rep = rep;
  59. ibdev->mdev = dev;
  60. ibdev->num_ports = max(MLX5_CAP_GEN(dev, num_ports),
  61. MLX5_CAP_GEN(dev, num_vhca_ports));
  62. if (!__mlx5_ib_add(ibdev, &rep_profile))
  63. return -EINVAL;
  64. rep->rep_if[REP_IB].priv = ibdev;
  65. return 0;
  66. }
  67. static void
  68. mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
  69. {
  70. struct mlx5_ib_dev *dev;
  71. if (!rep->rep_if[REP_IB].priv)
  72. return;
  73. dev = mlx5_ib_rep_to_dev(rep);
  74. __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
  75. rep->rep_if[REP_IB].priv = NULL;
  76. }
  77. static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
  78. {
  79. return mlx5_ib_rep_to_dev(rep);
  80. }
  81. static void mlx5_ib_rep_register_vf_vports(struct mlx5_ib_dev *dev)
  82. {
  83. struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
  84. int total_vfs = MLX5_TOTAL_VPORTS(dev->mdev);
  85. int vport;
  86. for (vport = 1; vport < total_vfs; vport++) {
  87. struct mlx5_eswitch_rep_if rep_if = {};
  88. rep_if.load = mlx5_ib_vport_rep_load;
  89. rep_if.unload = mlx5_ib_vport_rep_unload;
  90. rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev;
  91. mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_IB);
  92. }
  93. }
  94. static void mlx5_ib_rep_unregister_vf_vports(struct mlx5_ib_dev *dev)
  95. {
  96. struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
  97. int total_vfs = MLX5_TOTAL_VPORTS(dev->mdev);
  98. int vport;
  99. for (vport = 1; vport < total_vfs; vport++)
  100. mlx5_eswitch_unregister_vport_rep(esw, vport, REP_IB);
  101. }
  102. void mlx5_ib_register_vport_reps(struct mlx5_ib_dev *dev)
  103. {
  104. struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
  105. struct mlx5_eswitch_rep_if rep_if = {};
  106. rep_if.load = mlx5_ib_nic_rep_load;
  107. rep_if.unload = mlx5_ib_nic_rep_unload;
  108. rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev;
  109. rep_if.priv = dev;
  110. mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_IB);
  111. mlx5_ib_rep_register_vf_vports(dev);
  112. }
  113. void mlx5_ib_unregister_vport_reps(struct mlx5_ib_dev *dev)
  114. {
  115. struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
  116. mlx5_ib_rep_unregister_vf_vports(dev); /* VFs vports */
  117. mlx5_eswitch_unregister_vport_rep(esw, 0, REP_IB); /* UPLINK PF*/
  118. }
  119. u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw)
  120. {
  121. return mlx5_eswitch_mode(esw);
  122. }
  123. struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
  124. int vport_index)
  125. {
  126. return mlx5_eswitch_get_proto_dev(esw, vport_index, REP_IB);
  127. }
  128. struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
  129. int vport_index)
  130. {
  131. return mlx5_eswitch_get_proto_dev(esw, vport_index, REP_ETH);
  132. }
  133. struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw)
  134. {
  135. return mlx5_eswitch_uplink_get_proto_dev(esw, REP_IB);
  136. }
  137. struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw, int vport)
  138. {
  139. return mlx5_eswitch_vport_rep(esw, vport);
  140. }
  141. int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
  142. struct mlx5_ib_sq *sq)
  143. {
  144. struct mlx5_flow_handle *flow_rule;
  145. struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
  146. if (!dev->rep)
  147. return 0;
  148. flow_rule =
  149. mlx5_eswitch_add_send_to_vport_rule(esw,
  150. dev->rep->vport,
  151. sq->base.mqp.qpn);
  152. if (IS_ERR(flow_rule))
  153. return PTR_ERR(flow_rule);
  154. sq->flow_rule = flow_rule;
  155. return 0;
  156. }