security.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750
  1. /*
  2. * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/security.h>
  33. #include <linux/completion.h>
  34. #include <linux/list.h>
  35. #include <rdma/ib_verbs.h>
  36. #include <rdma/ib_cache.h>
  37. #include "core_priv.h"
  38. #include "mad_priv.h"
  39. static struct pkey_index_qp_list *get_pkey_idx_qp_list(struct ib_port_pkey *pp)
  40. {
  41. struct pkey_index_qp_list *pkey = NULL;
  42. struct pkey_index_qp_list *tmp_pkey;
  43. struct ib_device *dev = pp->sec->dev;
  44. spin_lock(&dev->port_pkey_list[pp->port_num].list_lock);
  45. list_for_each_entry(tmp_pkey,
  46. &dev->port_pkey_list[pp->port_num].pkey_list,
  47. pkey_index_list) {
  48. if (tmp_pkey->pkey_index == pp->pkey_index) {
  49. pkey = tmp_pkey;
  50. break;
  51. }
  52. }
  53. spin_unlock(&dev->port_pkey_list[pp->port_num].list_lock);
  54. return pkey;
  55. }
  56. static int get_pkey_and_subnet_prefix(struct ib_port_pkey *pp,
  57. u16 *pkey,
  58. u64 *subnet_prefix)
  59. {
  60. struct ib_device *dev = pp->sec->dev;
  61. int ret;
  62. ret = ib_get_cached_pkey(dev, pp->port_num, pp->pkey_index, pkey);
  63. if (ret)
  64. return ret;
  65. ret = ib_get_cached_subnet_prefix(dev, pp->port_num, subnet_prefix);
  66. return ret;
  67. }
  68. static int enforce_qp_pkey_security(u16 pkey,
  69. u64 subnet_prefix,
  70. struct ib_qp_security *qp_sec)
  71. {
  72. struct ib_qp_security *shared_qp_sec;
  73. int ret;
  74. ret = security_ib_pkey_access(qp_sec->security, subnet_prefix, pkey);
  75. if (ret)
  76. return ret;
  77. list_for_each_entry(shared_qp_sec,
  78. &qp_sec->shared_qp_list,
  79. shared_qp_list) {
  80. ret = security_ib_pkey_access(shared_qp_sec->security,
  81. subnet_prefix,
  82. pkey);
  83. if (ret)
  84. return ret;
  85. }
  86. return 0;
  87. }
  88. /* The caller of this function must hold the QP security
  89. * mutex of the QP of the security structure in *pps.
  90. *
  91. * It takes separate ports_pkeys and security structure
  92. * because in some cases the pps will be for a new settings
  93. * or the pps will be for the real QP and security structure
  94. * will be for a shared QP.
  95. */
  96. static int check_qp_port_pkey_settings(struct ib_ports_pkeys *pps,
  97. struct ib_qp_security *sec)
  98. {
  99. u64 subnet_prefix;
  100. u16 pkey;
  101. int ret = 0;
  102. if (!pps)
  103. return 0;
  104. if (pps->main.state != IB_PORT_PKEY_NOT_VALID) {
  105. ret = get_pkey_and_subnet_prefix(&pps->main,
  106. &pkey,
  107. &subnet_prefix);
  108. if (ret)
  109. return ret;
  110. ret = enforce_qp_pkey_security(pkey,
  111. subnet_prefix,
  112. sec);
  113. if (ret)
  114. return ret;
  115. }
  116. if (pps->alt.state != IB_PORT_PKEY_NOT_VALID) {
  117. ret = get_pkey_and_subnet_prefix(&pps->alt,
  118. &pkey,
  119. &subnet_prefix);
  120. if (ret)
  121. return ret;
  122. ret = enforce_qp_pkey_security(pkey,
  123. subnet_prefix,
  124. sec);
  125. }
  126. return ret;
  127. }
  128. /* The caller of this function must hold the QP security
  129. * mutex.
  130. */
  131. static void qp_to_error(struct ib_qp_security *sec)
  132. {
  133. struct ib_qp_security *shared_qp_sec;
  134. struct ib_qp_attr attr = {
  135. .qp_state = IB_QPS_ERR
  136. };
  137. struct ib_event event = {
  138. .event = IB_EVENT_QP_FATAL
  139. };
  140. /* If the QP is in the process of being destroyed
  141. * the qp pointer in the security structure is
  142. * undefined. It cannot be modified now.
  143. */
  144. if (sec->destroying)
  145. return;
  146. ib_modify_qp(sec->qp,
  147. &attr,
  148. IB_QP_STATE);
  149. if (sec->qp->event_handler && sec->qp->qp_context) {
  150. event.element.qp = sec->qp;
  151. sec->qp->event_handler(&event,
  152. sec->qp->qp_context);
  153. }
  154. list_for_each_entry(shared_qp_sec,
  155. &sec->shared_qp_list,
  156. shared_qp_list) {
  157. struct ib_qp *qp = shared_qp_sec->qp;
  158. if (qp->event_handler && qp->qp_context) {
  159. event.element.qp = qp;
  160. event.device = qp->device;
  161. qp->event_handler(&event,
  162. qp->qp_context);
  163. }
  164. }
  165. }
  166. static inline void check_pkey_qps(struct pkey_index_qp_list *pkey,
  167. struct ib_device *device,
  168. u8 port_num,
  169. u64 subnet_prefix)
  170. {
  171. struct ib_port_pkey *pp, *tmp_pp;
  172. bool comp;
  173. LIST_HEAD(to_error_list);
  174. u16 pkey_val;
  175. if (!ib_get_cached_pkey(device,
  176. port_num,
  177. pkey->pkey_index,
  178. &pkey_val)) {
  179. spin_lock(&pkey->qp_list_lock);
  180. list_for_each_entry(pp, &pkey->qp_list, qp_list) {
  181. if (atomic_read(&pp->sec->error_list_count))
  182. continue;
  183. if (enforce_qp_pkey_security(pkey_val,
  184. subnet_prefix,
  185. pp->sec)) {
  186. atomic_inc(&pp->sec->error_list_count);
  187. list_add(&pp->to_error_list,
  188. &to_error_list);
  189. }
  190. }
  191. spin_unlock(&pkey->qp_list_lock);
  192. }
  193. list_for_each_entry_safe(pp,
  194. tmp_pp,
  195. &to_error_list,
  196. to_error_list) {
  197. mutex_lock(&pp->sec->mutex);
  198. qp_to_error(pp->sec);
  199. list_del(&pp->to_error_list);
  200. atomic_dec(&pp->sec->error_list_count);
  201. comp = pp->sec->destroying;
  202. mutex_unlock(&pp->sec->mutex);
  203. if (comp)
  204. complete(&pp->sec->error_complete);
  205. }
  206. }
  207. /* The caller of this function must hold the QP security
  208. * mutex.
  209. */
  210. static int port_pkey_list_insert(struct ib_port_pkey *pp)
  211. {
  212. struct pkey_index_qp_list *tmp_pkey;
  213. struct pkey_index_qp_list *pkey;
  214. struct ib_device *dev;
  215. u8 port_num = pp->port_num;
  216. int ret = 0;
  217. if (pp->state != IB_PORT_PKEY_VALID)
  218. return 0;
  219. dev = pp->sec->dev;
  220. pkey = get_pkey_idx_qp_list(pp);
  221. if (!pkey) {
  222. bool found = false;
  223. pkey = kzalloc(sizeof(*pkey), GFP_KERNEL);
  224. if (!pkey)
  225. return -ENOMEM;
  226. spin_lock(&dev->port_pkey_list[port_num].list_lock);
  227. /* Check for the PKey again. A racing process may
  228. * have created it.
  229. */
  230. list_for_each_entry(tmp_pkey,
  231. &dev->port_pkey_list[port_num].pkey_list,
  232. pkey_index_list) {
  233. if (tmp_pkey->pkey_index == pp->pkey_index) {
  234. kfree(pkey);
  235. pkey = tmp_pkey;
  236. found = true;
  237. break;
  238. }
  239. }
  240. if (!found) {
  241. pkey->pkey_index = pp->pkey_index;
  242. spin_lock_init(&pkey->qp_list_lock);
  243. INIT_LIST_HEAD(&pkey->qp_list);
  244. list_add(&pkey->pkey_index_list,
  245. &dev->port_pkey_list[port_num].pkey_list);
  246. }
  247. spin_unlock(&dev->port_pkey_list[port_num].list_lock);
  248. }
  249. spin_lock(&pkey->qp_list_lock);
  250. list_add(&pp->qp_list, &pkey->qp_list);
  251. spin_unlock(&pkey->qp_list_lock);
  252. pp->state = IB_PORT_PKEY_LISTED;
  253. return ret;
  254. }
  255. /* The caller of this function must hold the QP security
  256. * mutex.
  257. */
  258. static void port_pkey_list_remove(struct ib_port_pkey *pp)
  259. {
  260. struct pkey_index_qp_list *pkey;
  261. if (pp->state != IB_PORT_PKEY_LISTED)
  262. return;
  263. pkey = get_pkey_idx_qp_list(pp);
  264. spin_lock(&pkey->qp_list_lock);
  265. list_del(&pp->qp_list);
  266. spin_unlock(&pkey->qp_list_lock);
  267. /* The setting may still be valid, i.e. after
  268. * a destroy has failed for example.
  269. */
  270. pp->state = IB_PORT_PKEY_VALID;
  271. }
  272. static void destroy_qp_security(struct ib_qp_security *sec)
  273. {
  274. security_ib_free_security(sec->security);
  275. kfree(sec->ports_pkeys);
  276. kfree(sec);
  277. }
  278. /* The caller of this function must hold the QP security
  279. * mutex.
  280. */
  281. static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
  282. const struct ib_qp_attr *qp_attr,
  283. int qp_attr_mask)
  284. {
  285. struct ib_ports_pkeys *new_pps;
  286. struct ib_ports_pkeys *qp_pps = qp->qp_sec->ports_pkeys;
  287. new_pps = kzalloc(sizeof(*new_pps), GFP_KERNEL);
  288. if (!new_pps)
  289. return NULL;
  290. if (qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) {
  291. if (!qp_pps) {
  292. new_pps->main.port_num = qp_attr->port_num;
  293. new_pps->main.pkey_index = qp_attr->pkey_index;
  294. } else {
  295. new_pps->main.port_num = (qp_attr_mask & IB_QP_PORT) ?
  296. qp_attr->port_num :
  297. qp_pps->main.port_num;
  298. new_pps->main.pkey_index =
  299. (qp_attr_mask & IB_QP_PKEY_INDEX) ?
  300. qp_attr->pkey_index :
  301. qp_pps->main.pkey_index;
  302. }
  303. new_pps->main.state = IB_PORT_PKEY_VALID;
  304. } else if (qp_pps) {
  305. new_pps->main.port_num = qp_pps->main.port_num;
  306. new_pps->main.pkey_index = qp_pps->main.pkey_index;
  307. if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
  308. new_pps->main.state = IB_PORT_PKEY_VALID;
  309. }
  310. if (qp_attr_mask & IB_QP_ALT_PATH) {
  311. new_pps->alt.port_num = qp_attr->alt_port_num;
  312. new_pps->alt.pkey_index = qp_attr->alt_pkey_index;
  313. new_pps->alt.state = IB_PORT_PKEY_VALID;
  314. } else if (qp_pps) {
  315. new_pps->alt.port_num = qp_pps->alt.port_num;
  316. new_pps->alt.pkey_index = qp_pps->alt.pkey_index;
  317. if (qp_pps->alt.state != IB_PORT_PKEY_NOT_VALID)
  318. new_pps->alt.state = IB_PORT_PKEY_VALID;
  319. }
  320. new_pps->main.sec = qp->qp_sec;
  321. new_pps->alt.sec = qp->qp_sec;
  322. return new_pps;
  323. }
  324. int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev)
  325. {
  326. struct ib_qp *real_qp = qp->real_qp;
  327. int ret;
  328. ret = ib_create_qp_security(qp, dev);
  329. if (ret)
  330. return ret;
  331. if (!qp->qp_sec)
  332. return 0;
  333. mutex_lock(&real_qp->qp_sec->mutex);
  334. ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys,
  335. qp->qp_sec);
  336. if (ret)
  337. goto ret;
  338. if (qp != real_qp)
  339. list_add(&qp->qp_sec->shared_qp_list,
  340. &real_qp->qp_sec->shared_qp_list);
  341. ret:
  342. mutex_unlock(&real_qp->qp_sec->mutex);
  343. if (ret)
  344. destroy_qp_security(qp->qp_sec);
  345. return ret;
  346. }
  347. void ib_close_shared_qp_security(struct ib_qp_security *sec)
  348. {
  349. struct ib_qp *real_qp = sec->qp->real_qp;
  350. mutex_lock(&real_qp->qp_sec->mutex);
  351. list_del(&sec->shared_qp_list);
  352. mutex_unlock(&real_qp->qp_sec->mutex);
  353. destroy_qp_security(sec);
  354. }
  355. int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
  356. {
  357. u8 i = rdma_start_port(dev);
  358. bool is_ib = false;
  359. int ret;
  360. while (i <= rdma_end_port(dev) && !is_ib)
  361. is_ib = rdma_protocol_ib(dev, i++);
  362. /* If this isn't an IB device don't create the security context */
  363. if (!is_ib)
  364. return 0;
  365. qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL);
  366. if (!qp->qp_sec)
  367. return -ENOMEM;
  368. qp->qp_sec->qp = qp;
  369. qp->qp_sec->dev = dev;
  370. mutex_init(&qp->qp_sec->mutex);
  371. INIT_LIST_HEAD(&qp->qp_sec->shared_qp_list);
  372. atomic_set(&qp->qp_sec->error_list_count, 0);
  373. init_completion(&qp->qp_sec->error_complete);
  374. ret = security_ib_alloc_security(&qp->qp_sec->security);
  375. if (ret) {
  376. kfree(qp->qp_sec);
  377. qp->qp_sec = NULL;
  378. }
  379. return ret;
  380. }
  381. EXPORT_SYMBOL(ib_create_qp_security);
  382. void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
  383. {
  384. /* Return if not IB */
  385. if (!sec)
  386. return;
  387. mutex_lock(&sec->mutex);
  388. /* Remove the QP from the lists so it won't get added to
  389. * a to_error_list during the destroy process.
  390. */
  391. if (sec->ports_pkeys) {
  392. port_pkey_list_remove(&sec->ports_pkeys->main);
  393. port_pkey_list_remove(&sec->ports_pkeys->alt);
  394. }
  395. /* If the QP is already in one or more of those lists
  396. * the destroying flag will ensure the to error flow
  397. * doesn't operate on an undefined QP.
  398. */
  399. sec->destroying = true;
  400. /* Record the error list count to know how many completions
  401. * to wait for.
  402. */
  403. sec->error_comps_pending = atomic_read(&sec->error_list_count);
  404. mutex_unlock(&sec->mutex);
  405. }
  406. void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
  407. {
  408. int ret;
  409. int i;
  410. /* Return if not IB */
  411. if (!sec)
  412. return;
  413. /* If a concurrent cache update is in progress this
  414. * QP security could be marked for an error state
  415. * transition. Wait for this to complete.
  416. */
  417. for (i = 0; i < sec->error_comps_pending; i++)
  418. wait_for_completion(&sec->error_complete);
  419. mutex_lock(&sec->mutex);
  420. sec->destroying = false;
  421. /* Restore the position in the lists and verify
  422. * access is still allowed in case a cache update
  423. * occurred while attempting to destroy.
  424. *
  425. * Because these setting were listed already
  426. * and removed during ib_destroy_qp_security_begin
  427. * we know the pkey_index_qp_list for the PKey
  428. * already exists so port_pkey_list_insert won't fail.
  429. */
  430. if (sec->ports_pkeys) {
  431. port_pkey_list_insert(&sec->ports_pkeys->main);
  432. port_pkey_list_insert(&sec->ports_pkeys->alt);
  433. }
  434. ret = check_qp_port_pkey_settings(sec->ports_pkeys, sec);
  435. if (ret)
  436. qp_to_error(sec);
  437. mutex_unlock(&sec->mutex);
  438. }
  439. void ib_destroy_qp_security_end(struct ib_qp_security *sec)
  440. {
  441. int i;
  442. /* Return if not IB */
  443. if (!sec)
  444. return;
  445. /* If a concurrent cache update is occurring we must
  446. * wait until this QP security structure is processed
  447. * in the QP to error flow before destroying it because
  448. * the to_error_list is in use.
  449. */
  450. for (i = 0; i < sec->error_comps_pending; i++)
  451. wait_for_completion(&sec->error_complete);
  452. destroy_qp_security(sec);
  453. }
  454. void ib_security_cache_change(struct ib_device *device,
  455. u8 port_num,
  456. u64 subnet_prefix)
  457. {
  458. struct pkey_index_qp_list *pkey;
  459. list_for_each_entry(pkey,
  460. &device->port_pkey_list[port_num].pkey_list,
  461. pkey_index_list) {
  462. check_pkey_qps(pkey,
  463. device,
  464. port_num,
  465. subnet_prefix);
  466. }
  467. }
  468. void ib_security_destroy_port_pkey_list(struct ib_device *device)
  469. {
  470. struct pkey_index_qp_list *pkey, *tmp_pkey;
  471. int i;
  472. for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
  473. spin_lock(&device->port_pkey_list[i].list_lock);
  474. list_for_each_entry_safe(pkey,
  475. tmp_pkey,
  476. &device->port_pkey_list[i].pkey_list,
  477. pkey_index_list) {
  478. list_del(&pkey->pkey_index_list);
  479. kfree(pkey);
  480. }
  481. spin_unlock(&device->port_pkey_list[i].list_lock);
  482. }
  483. }
  484. int ib_security_modify_qp(struct ib_qp *qp,
  485. struct ib_qp_attr *qp_attr,
  486. int qp_attr_mask,
  487. struct ib_udata *udata)
  488. {
  489. int ret = 0;
  490. struct ib_ports_pkeys *tmp_pps;
  491. struct ib_ports_pkeys *new_pps = NULL;
  492. struct ib_qp *real_qp = qp->real_qp;
  493. bool special_qp = (real_qp->qp_type == IB_QPT_SMI ||
  494. real_qp->qp_type == IB_QPT_GSI ||
  495. real_qp->qp_type >= IB_QPT_RESERVED1);
  496. bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
  497. (qp_attr_mask & IB_QP_ALT_PATH));
  498. WARN_ONCE((qp_attr_mask & IB_QP_PORT &&
  499. rdma_protocol_ib(real_qp->device, qp_attr->port_num) &&
  500. !real_qp->qp_sec),
  501. "%s: QP security is not initialized for IB QP: %d\n",
  502. __func__, real_qp->qp_num);
  503. /* The port/pkey settings are maintained only for the real QP. Open
  504. * handles on the real QP will be in the shared_qp_list. When
  505. * enforcing security on the real QP all the shared QPs will be
  506. * checked as well.
  507. */
  508. if (pps_change && !special_qp && real_qp->qp_sec) {
  509. mutex_lock(&real_qp->qp_sec->mutex);
  510. new_pps = get_new_pps(real_qp,
  511. qp_attr,
  512. qp_attr_mask);
  513. if (!new_pps) {
  514. mutex_unlock(&real_qp->qp_sec->mutex);
  515. return -ENOMEM;
  516. }
  517. /* Add this QP to the lists for the new port
  518. * and pkey settings before checking for permission
  519. * in case there is a concurrent cache update
  520. * occurring. Walking the list for a cache change
  521. * doesn't acquire the security mutex unless it's
  522. * sending the QP to error.
  523. */
  524. ret = port_pkey_list_insert(&new_pps->main);
  525. if (!ret)
  526. ret = port_pkey_list_insert(&new_pps->alt);
  527. if (!ret)
  528. ret = check_qp_port_pkey_settings(new_pps,
  529. real_qp->qp_sec);
  530. }
  531. if (!ret)
  532. ret = real_qp->device->modify_qp(real_qp,
  533. qp_attr,
  534. qp_attr_mask,
  535. udata);
  536. if (new_pps) {
  537. /* Clean up the lists and free the appropriate
  538. * ports_pkeys structure.
  539. */
  540. if (ret) {
  541. tmp_pps = new_pps;
  542. } else {
  543. tmp_pps = real_qp->qp_sec->ports_pkeys;
  544. real_qp->qp_sec->ports_pkeys = new_pps;
  545. }
  546. if (tmp_pps) {
  547. port_pkey_list_remove(&tmp_pps->main);
  548. port_pkey_list_remove(&tmp_pps->alt);
  549. }
  550. kfree(tmp_pps);
  551. mutex_unlock(&real_qp->qp_sec->mutex);
  552. }
  553. return ret;
  554. }
  555. static int ib_security_pkey_access(struct ib_device *dev,
  556. u8 port_num,
  557. u16 pkey_index,
  558. void *sec)
  559. {
  560. u64 subnet_prefix;
  561. u16 pkey;
  562. int ret;
  563. if (!rdma_protocol_ib(dev, port_num))
  564. return 0;
  565. ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey);
  566. if (ret)
  567. return ret;
  568. ret = ib_get_cached_subnet_prefix(dev, port_num, &subnet_prefix);
  569. if (ret)
  570. return ret;
  571. return security_ib_pkey_access(sec, subnet_prefix, pkey);
  572. }
  573. static int ib_mad_agent_security_change(struct notifier_block *nb,
  574. unsigned long event,
  575. void *data)
  576. {
  577. struct ib_mad_agent *ag = container_of(nb, struct ib_mad_agent, lsm_nb);
  578. if (event != LSM_POLICY_CHANGE)
  579. return NOTIFY_DONE;
  580. ag->smp_allowed = !security_ib_endport_manage_subnet(
  581. ag->security, dev_name(&ag->device->dev), ag->port_num);
  582. return NOTIFY_OK;
  583. }
  584. int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
  585. enum ib_qp_type qp_type)
  586. {
  587. int ret;
  588. if (!rdma_protocol_ib(agent->device, agent->port_num))
  589. return 0;
  590. ret = security_ib_alloc_security(&agent->security);
  591. if (ret)
  592. return ret;
  593. if (qp_type != IB_QPT_SMI)
  594. return 0;
  595. ret = security_ib_endport_manage_subnet(agent->security,
  596. dev_name(&agent->device->dev),
  597. agent->port_num);
  598. if (ret)
  599. return ret;
  600. agent->lsm_nb.notifier_call = ib_mad_agent_security_change;
  601. ret = register_lsm_notifier(&agent->lsm_nb);
  602. if (ret)
  603. return ret;
  604. agent->smp_allowed = true;
  605. agent->lsm_nb_reg = true;
  606. return 0;
  607. }
  608. void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
  609. {
  610. if (!rdma_protocol_ib(agent->device, agent->port_num))
  611. return;
  612. security_ib_free_security(agent->security);
  613. if (agent->lsm_nb_reg)
  614. unregister_lsm_notifier(&agent->lsm_nb);
  615. }
  616. int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
  617. {
  618. if (!rdma_protocol_ib(map->agent.device, map->agent.port_num))
  619. return 0;
  620. if (map->agent.qp->qp_type == IB_QPT_SMI) {
  621. if (!map->agent.smp_allowed)
  622. return -EACCES;
  623. return 0;
  624. }
  625. return ib_security_pkey_access(map->agent.device,
  626. map->agent.port_num,
  627. pkey_index,
  628. map->agent.security);
  629. }