ib_rep.c 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192
  1. // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
  2. /*
  3. * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
  4. */
  5. #include "ib_rep.h"
  6. static const struct mlx5_ib_profile rep_profile = {
  7. STAGE_CREATE(MLX5_IB_STAGE_INIT,
  8. mlx5_ib_stage_init_init,
  9. mlx5_ib_stage_init_cleanup),
  10. STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
  11. mlx5_ib_stage_rep_flow_db_init,
  12. NULL),
  13. STAGE_CREATE(MLX5_IB_STAGE_CAPS,
  14. mlx5_ib_stage_caps_init,
  15. NULL),
  16. STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
  17. mlx5_ib_stage_rep_non_default_cb,
  18. NULL),
  19. STAGE_CREATE(MLX5_IB_STAGE_ROCE,
  20. mlx5_ib_stage_rep_roce_init,
  21. mlx5_ib_stage_rep_roce_cleanup),
  22. STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
  23. mlx5_ib_stage_dev_res_init,
  24. mlx5_ib_stage_dev_res_cleanup),
  25. STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
  26. mlx5_ib_stage_counters_init,
  27. mlx5_ib_stage_counters_cleanup),
  28. STAGE_CREATE(MLX5_IB_STAGE_BFREG,
  29. mlx5_ib_stage_bfrag_init,
  30. mlx5_ib_stage_bfrag_cleanup),
  31. STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
  32. NULL,
  33. mlx5_ib_stage_pre_ib_reg_umr_cleanup),
  34. STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
  35. mlx5_ib_stage_ib_reg_init,
  36. mlx5_ib_stage_ib_reg_cleanup),
  37. STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
  38. mlx5_ib_stage_post_ib_reg_umr_init,
  39. NULL),
  40. STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
  41. mlx5_ib_stage_class_attr_init,
  42. NULL),
  43. };
  44. static int
  45. mlx5_ib_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
  46. {
  47. return 0;
  48. }
  49. static void
  50. mlx5_ib_nic_rep_unload(struct mlx5_eswitch_rep *rep)
  51. {
  52. rep->rep_if[REP_IB].priv = NULL;
  53. }
  54. static int
  55. mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
  56. {
  57. struct mlx5_ib_dev *ibdev;
  58. ibdev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*ibdev));
  59. if (!ibdev)
  60. return -ENOMEM;
  61. ibdev->rep = rep;
  62. ibdev->mdev = dev;
  63. ibdev->num_ports = max(MLX5_CAP_GEN(dev, num_ports),
  64. MLX5_CAP_GEN(dev, num_vhca_ports));
  65. if (!__mlx5_ib_add(ibdev, &rep_profile))
  66. return -EINVAL;
  67. rep->rep_if[REP_IB].priv = ibdev;
  68. return 0;
  69. }
  70. static void
  71. mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
  72. {
  73. struct mlx5_ib_dev *dev;
  74. if (!rep->rep_if[REP_IB].priv)
  75. return;
  76. dev = mlx5_ib_rep_to_dev(rep);
  77. __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
  78. rep->rep_if[REP_IB].priv = NULL;
  79. }
  80. static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
  81. {
  82. return mlx5_ib_rep_to_dev(rep);
  83. }
  84. static void mlx5_ib_rep_register_vf_vports(struct mlx5_ib_dev *dev)
  85. {
  86. struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
  87. int total_vfs = MLX5_TOTAL_VPORTS(dev->mdev);
  88. int vport;
  89. for (vport = 1; vport < total_vfs; vport++) {
  90. struct mlx5_eswitch_rep_if rep_if = {};
  91. rep_if.load = mlx5_ib_vport_rep_load;
  92. rep_if.unload = mlx5_ib_vport_rep_unload;
  93. rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev;
  94. mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_IB);
  95. }
  96. }
  97. static void mlx5_ib_rep_unregister_vf_vports(struct mlx5_ib_dev *dev)
  98. {
  99. struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
  100. int total_vfs = MLX5_TOTAL_VPORTS(dev->mdev);
  101. int vport;
  102. for (vport = 1; vport < total_vfs; vport++)
  103. mlx5_eswitch_unregister_vport_rep(esw, vport, REP_IB);
  104. }
  105. void mlx5_ib_register_vport_reps(struct mlx5_ib_dev *dev)
  106. {
  107. struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
  108. struct mlx5_eswitch_rep_if rep_if = {};
  109. rep_if.load = mlx5_ib_nic_rep_load;
  110. rep_if.unload = mlx5_ib_nic_rep_unload;
  111. rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev;
  112. rep_if.priv = dev;
  113. mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_IB);
  114. mlx5_ib_rep_register_vf_vports(dev);
  115. }
  116. void mlx5_ib_unregister_vport_reps(struct mlx5_ib_dev *dev)
  117. {
  118. struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
  119. mlx5_ib_rep_unregister_vf_vports(dev); /* VFs vports */
  120. mlx5_eswitch_unregister_vport_rep(esw, 0, REP_IB); /* UPLINK PF*/
  121. }
  122. u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw)
  123. {
  124. return mlx5_eswitch_mode(esw);
  125. }
  126. struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
  127. int vport_index)
  128. {
  129. return mlx5_eswitch_get_proto_dev(esw, vport_index, REP_IB);
  130. }
  131. struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
  132. int vport_index)
  133. {
  134. return mlx5_eswitch_get_proto_dev(esw, vport_index, REP_ETH);
  135. }
  136. struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw)
  137. {
  138. return mlx5_eswitch_uplink_get_proto_dev(esw, REP_IB);
  139. }
  140. struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw, int vport)
  141. {
  142. return mlx5_eswitch_vport_rep(esw, vport);
  143. }
  144. int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
  145. struct mlx5_ib_sq *sq)
  146. {
  147. struct mlx5_flow_handle *flow_rule;
  148. struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
  149. if (!dev->rep)
  150. return 0;
  151. flow_rule =
  152. mlx5_eswitch_add_send_to_vport_rule(esw,
  153. dev->rep->vport,
  154. sq->base.mqp.qpn);
  155. if (IS_ERR(flow_rule))
  156. return PTR_ERR(flow_rule);
  157. sq->flow_rule = flow_rule;
  158. return 0;
  159. }