core_priv.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347
  1. /*
  2. * Copyright (c) 2004 Topspin Communications. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #ifndef _CORE_PRIV_H
  33. #define _CORE_PRIV_H
  34. #include <linux/list.h>
  35. #include <linux/spinlock.h>
  36. #include <linux/cgroup_rdma.h>
  37. #include <rdma/ib_verbs.h>
  38. #include <rdma/opa_addr.h>
  39. #include <rdma/ib_mad.h>
  40. #include <rdma/restrack.h>
  41. #include "mad_priv.h"
  42. /* Total number of ports combined across all struct ib_devices's */
  43. #define RDMA_MAX_PORTS 1024
  44. struct pkey_index_qp_list {
  45. struct list_head pkey_index_list;
  46. u16 pkey_index;
  47. /* Lock to hold while iterating the qp_list. */
  48. spinlock_t qp_list_lock;
  49. struct list_head qp_list;
  50. };
  51. #if IS_ENABLED(CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS)
  52. int cma_configfs_init(void);
  53. void cma_configfs_exit(void);
  54. #else
  55. static inline int cma_configfs_init(void)
  56. {
  57. return 0;
  58. }
  59. static inline void cma_configfs_exit(void)
  60. {
  61. }
  62. #endif
  63. struct cma_device;
  64. void cma_ref_dev(struct cma_device *cma_dev);
  65. void cma_deref_dev(struct cma_device *cma_dev);
  66. typedef bool (*cma_device_filter)(struct ib_device *, void *);
  67. struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter,
  68. void *cookie);
  69. int cma_get_default_gid_type(struct cma_device *cma_dev,
  70. unsigned int port);
  71. int cma_set_default_gid_type(struct cma_device *cma_dev,
  72. unsigned int port,
  73. enum ib_gid_type default_gid_type);
  74. int cma_get_default_roce_tos(struct cma_device *cma_dev, unsigned int port);
  75. int cma_set_default_roce_tos(struct cma_device *a_dev, unsigned int port,
  76. u8 default_roce_tos);
  77. struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev);
  78. int ib_device_register_sysfs(struct ib_device *device,
  79. int (*port_callback)(struct ib_device *,
  80. u8, struct kobject *));
  81. void ib_device_unregister_sysfs(struct ib_device *device);
  82. void ib_cache_setup(void);
  83. void ib_cache_cleanup(void);
  84. typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port,
  85. struct net_device *idev, void *cookie);
  86. typedef int (*roce_netdev_filter)(struct ib_device *device, u8 port,
  87. struct net_device *idev, void *cookie);
  88. void ib_enum_roce_netdev(struct ib_device *ib_dev,
  89. roce_netdev_filter filter,
  90. void *filter_cookie,
  91. roce_netdev_callback cb,
  92. void *cookie);
  93. void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
  94. void *filter_cookie,
  95. roce_netdev_callback cb,
  96. void *cookie);
  97. typedef int (*nldev_callback)(struct ib_device *device,
  98. struct sk_buff *skb,
  99. struct netlink_callback *cb,
  100. unsigned int idx);
  101. int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
  102. struct netlink_callback *cb);
  103. enum ib_cache_gid_default_mode {
  104. IB_CACHE_GID_DEFAULT_MODE_SET,
  105. IB_CACHE_GID_DEFAULT_MODE_DELETE
  106. };
  107. int ib_cache_gid_parse_type_str(const char *buf);
  108. const char *ib_cache_gid_type_str(enum ib_gid_type gid_type);
  109. void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
  110. struct net_device *ndev,
  111. unsigned long gid_type_mask,
  112. enum ib_cache_gid_default_mode mode);
  113. int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
  114. union ib_gid *gid, struct ib_gid_attr *attr);
  115. int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
  116. union ib_gid *gid, struct ib_gid_attr *attr);
  117. int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
  118. struct net_device *ndev);
  119. int roce_gid_mgmt_init(void);
  120. void roce_gid_mgmt_cleanup(void);
  121. unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port);
  122. int ib_cache_setup_one(struct ib_device *device);
  123. void ib_cache_cleanup_one(struct ib_device *device);
  124. void ib_cache_release_one(struct ib_device *device);
  125. #ifdef CONFIG_CGROUP_RDMA
  126. int ib_device_register_rdmacg(struct ib_device *device);
  127. void ib_device_unregister_rdmacg(struct ib_device *device);
  128. int ib_rdmacg_try_charge(struct ib_rdmacg_object *cg_obj,
  129. struct ib_device *device,
  130. enum rdmacg_resource_type resource_index);
  131. void ib_rdmacg_uncharge(struct ib_rdmacg_object *cg_obj,
  132. struct ib_device *device,
  133. enum rdmacg_resource_type resource_index);
  134. #else
  135. static inline int ib_device_register_rdmacg(struct ib_device *device)
  136. { return 0; }
  137. static inline void ib_device_unregister_rdmacg(struct ib_device *device)
  138. { }
  139. static inline int ib_rdmacg_try_charge(struct ib_rdmacg_object *cg_obj,
  140. struct ib_device *device,
  141. enum rdmacg_resource_type resource_index)
  142. { return 0; }
  143. static inline void ib_rdmacg_uncharge(struct ib_rdmacg_object *cg_obj,
  144. struct ib_device *device,
  145. enum rdmacg_resource_type resource_index)
  146. { }
  147. #endif
  148. static inline bool rdma_is_upper_dev_rcu(struct net_device *dev,
  149. struct net_device *upper)
  150. {
  151. return netdev_has_upper_dev_all_rcu(dev, upper);
  152. }
  153. int addr_init(void);
  154. void addr_cleanup(void);
  155. int ib_mad_init(void);
  156. void ib_mad_cleanup(void);
  157. int ib_sa_init(void);
  158. void ib_sa_cleanup(void);
  159. int rdma_nl_init(void);
  160. void rdma_nl_exit(void);
  161. int ib_nl_handle_resolve_resp(struct sk_buff *skb,
  162. struct nlmsghdr *nlh,
  163. struct netlink_ext_ack *extack);
  164. int ib_nl_handle_set_timeout(struct sk_buff *skb,
  165. struct nlmsghdr *nlh,
  166. struct netlink_ext_ack *extack);
  167. int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
  168. struct nlmsghdr *nlh,
  169. struct netlink_ext_ack *extack);
  170. int ib_get_cached_subnet_prefix(struct ib_device *device,
  171. u8 port_num,
  172. u64 *sn_pfx);
  173. #ifdef CONFIG_SECURITY_INFINIBAND
  174. void ib_security_destroy_port_pkey_list(struct ib_device *device);
  175. void ib_security_cache_change(struct ib_device *device,
  176. u8 port_num,
  177. u64 subnet_prefix);
  178. int ib_security_modify_qp(struct ib_qp *qp,
  179. struct ib_qp_attr *qp_attr,
  180. int qp_attr_mask,
  181. struct ib_udata *udata);
  182. int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev);
  183. void ib_destroy_qp_security_begin(struct ib_qp_security *sec);
  184. void ib_destroy_qp_security_abort(struct ib_qp_security *sec);
  185. void ib_destroy_qp_security_end(struct ib_qp_security *sec);
  186. int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev);
  187. void ib_close_shared_qp_security(struct ib_qp_security *sec);
  188. int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
  189. enum ib_qp_type qp_type);
  190. void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent);
  191. int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index);
  192. #else
  193. static inline void ib_security_destroy_port_pkey_list(struct ib_device *device)
  194. {
  195. }
  196. static inline void ib_security_cache_change(struct ib_device *device,
  197. u8 port_num,
  198. u64 subnet_prefix)
  199. {
  200. }
  201. static inline int ib_security_modify_qp(struct ib_qp *qp,
  202. struct ib_qp_attr *qp_attr,
  203. int qp_attr_mask,
  204. struct ib_udata *udata)
  205. {
  206. return qp->device->modify_qp(qp->real_qp,
  207. qp_attr,
  208. qp_attr_mask,
  209. udata);
  210. }
  211. static inline int ib_create_qp_security(struct ib_qp *qp,
  212. struct ib_device *dev)
  213. {
  214. return 0;
  215. }
  216. static inline void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
  217. {
  218. }
  219. static inline void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
  220. {
  221. }
  222. static inline void ib_destroy_qp_security_end(struct ib_qp_security *sec)
  223. {
  224. }
  225. static inline int ib_open_shared_qp_security(struct ib_qp *qp,
  226. struct ib_device *dev)
  227. {
  228. return 0;
  229. }
  230. static inline void ib_close_shared_qp_security(struct ib_qp_security *sec)
  231. {
  232. }
  233. static inline int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
  234. enum ib_qp_type qp_type)
  235. {
  236. return 0;
  237. }
  238. static inline void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
  239. {
  240. }
  241. static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map,
  242. u16 pkey_index)
  243. {
  244. return 0;
  245. }
  246. #endif
  247. struct ib_device *ib_device_get_by_index(u32 ifindex);
  248. /* RDMA device netlink */
  249. void nldev_init(void);
  250. void nldev_exit(void);
  251. static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
  252. struct ib_pd *pd,
  253. struct ib_qp_init_attr *attr,
  254. struct ib_udata *udata,
  255. struct ib_uobject *uobj)
  256. {
  257. struct ib_qp *qp;
  258. if (!dev->create_qp)
  259. return ERR_PTR(-EOPNOTSUPP);
  260. qp = dev->create_qp(pd, attr, udata);
  261. if (IS_ERR(qp))
  262. return qp;
  263. qp->device = dev;
  264. qp->pd = pd;
  265. qp->uobject = uobj;
  266. /*
  267. * We don't track XRC QPs for now, because they don't have PD
  268. * and more importantly they are created internaly by driver,
  269. * see mlx5 create_dev_resources() as an example.
  270. */
  271. if (attr->qp_type < IB_QPT_XRC_INI) {
  272. qp->res.type = RDMA_RESTRACK_QP;
  273. rdma_restrack_add(&qp->res);
  274. } else
  275. qp->res.valid = false;
  276. return qp;
  277. }
  278. struct rdma_dev_addr;
  279. int rdma_resolve_ip_route(struct sockaddr *src_addr,
  280. const struct sockaddr *dst_addr,
  281. struct rdma_dev_addr *addr);
  282. int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
  283. const union ib_gid *dgid,
  284. u8 *dmac, const struct net_device *ndev,
  285. int *hoplimit);
  286. #endif /* _CORE_PRIV_H */