security.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705
  1. /*
  2. * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #ifdef CONFIG_SECURITY_INFINIBAND
  33. #include <linux/security.h>
  34. #include <linux/completion.h>
  35. #include <linux/list.h>
  36. #include <rdma/ib_verbs.h>
  37. #include <rdma/ib_cache.h>
  38. #include "core_priv.h"
  39. #include "mad_priv.h"
  40. static struct pkey_index_qp_list *get_pkey_idx_qp_list(struct ib_port_pkey *pp)
  41. {
  42. struct pkey_index_qp_list *pkey = NULL;
  43. struct pkey_index_qp_list *tmp_pkey;
  44. struct ib_device *dev = pp->sec->dev;
  45. spin_lock(&dev->port_pkey_list[pp->port_num].list_lock);
  46. list_for_each_entry(tmp_pkey,
  47. &dev->port_pkey_list[pp->port_num].pkey_list,
  48. pkey_index_list) {
  49. if (tmp_pkey->pkey_index == pp->pkey_index) {
  50. pkey = tmp_pkey;
  51. break;
  52. }
  53. }
  54. spin_unlock(&dev->port_pkey_list[pp->port_num].list_lock);
  55. return pkey;
  56. }
  57. static int get_pkey_and_subnet_prefix(struct ib_port_pkey *pp,
  58. u16 *pkey,
  59. u64 *subnet_prefix)
  60. {
  61. struct ib_device *dev = pp->sec->dev;
  62. int ret;
  63. ret = ib_get_cached_pkey(dev, pp->port_num, pp->pkey_index, pkey);
  64. if (ret)
  65. return ret;
  66. ret = ib_get_cached_subnet_prefix(dev, pp->port_num, subnet_prefix);
  67. return ret;
  68. }
  69. static int enforce_qp_pkey_security(u16 pkey,
  70. u64 subnet_prefix,
  71. struct ib_qp_security *qp_sec)
  72. {
  73. struct ib_qp_security *shared_qp_sec;
  74. int ret;
  75. ret = security_ib_pkey_access(qp_sec->security, subnet_prefix, pkey);
  76. if (ret)
  77. return ret;
  78. if (qp_sec->qp == qp_sec->qp->real_qp) {
  79. list_for_each_entry(shared_qp_sec,
  80. &qp_sec->shared_qp_list,
  81. shared_qp_list) {
  82. ret = security_ib_pkey_access(shared_qp_sec->security,
  83. subnet_prefix,
  84. pkey);
  85. if (ret)
  86. return ret;
  87. }
  88. }
  89. return 0;
  90. }
  91. /* The caller of this function must hold the QP security
  92. * mutex of the QP of the security structure in *pps.
  93. *
  94. * It takes separate ports_pkeys and security structure
  95. * because in some cases the pps will be for a new settings
  96. * or the pps will be for the real QP and security structure
  97. * will be for a shared QP.
  98. */
  99. static int check_qp_port_pkey_settings(struct ib_ports_pkeys *pps,
  100. struct ib_qp_security *sec)
  101. {
  102. u64 subnet_prefix;
  103. u16 pkey;
  104. int ret = 0;
  105. if (!pps)
  106. return 0;
  107. if (pps->main.state != IB_PORT_PKEY_NOT_VALID) {
  108. get_pkey_and_subnet_prefix(&pps->main,
  109. &pkey,
  110. &subnet_prefix);
  111. ret = enforce_qp_pkey_security(pkey,
  112. subnet_prefix,
  113. sec);
  114. }
  115. if (ret)
  116. return ret;
  117. if (pps->alt.state != IB_PORT_PKEY_NOT_VALID) {
  118. get_pkey_and_subnet_prefix(&pps->alt,
  119. &pkey,
  120. &subnet_prefix);
  121. ret = enforce_qp_pkey_security(pkey,
  122. subnet_prefix,
  123. sec);
  124. }
  125. return ret;
  126. }
  127. /* The caller of this function must hold the QP security
  128. * mutex.
  129. */
  130. static void qp_to_error(struct ib_qp_security *sec)
  131. {
  132. struct ib_qp_security *shared_qp_sec;
  133. struct ib_qp_attr attr = {
  134. .qp_state = IB_QPS_ERR
  135. };
  136. struct ib_event event = {
  137. .event = IB_EVENT_QP_FATAL
  138. };
  139. /* If the QP is in the process of being destroyed
  140. * the qp pointer in the security structure is
  141. * undefined. It cannot be modified now.
  142. */
  143. if (sec->destroying)
  144. return;
  145. ib_modify_qp(sec->qp,
  146. &attr,
  147. IB_QP_STATE);
  148. if (sec->qp->event_handler && sec->qp->qp_context) {
  149. event.element.qp = sec->qp;
  150. sec->qp->event_handler(&event,
  151. sec->qp->qp_context);
  152. }
  153. list_for_each_entry(shared_qp_sec,
  154. &sec->shared_qp_list,
  155. shared_qp_list) {
  156. struct ib_qp *qp = shared_qp_sec->qp;
  157. if (qp->event_handler && qp->qp_context) {
  158. event.element.qp = qp;
  159. event.device = qp->device;
  160. qp->event_handler(&event,
  161. qp->qp_context);
  162. }
  163. }
  164. }
  165. static inline void check_pkey_qps(struct pkey_index_qp_list *pkey,
  166. struct ib_device *device,
  167. u8 port_num,
  168. u64 subnet_prefix)
  169. {
  170. struct ib_port_pkey *pp, *tmp_pp;
  171. bool comp;
  172. LIST_HEAD(to_error_list);
  173. u16 pkey_val;
  174. if (!ib_get_cached_pkey(device,
  175. port_num,
  176. pkey->pkey_index,
  177. &pkey_val)) {
  178. spin_lock(&pkey->qp_list_lock);
  179. list_for_each_entry(pp, &pkey->qp_list, qp_list) {
  180. if (atomic_read(&pp->sec->error_list_count))
  181. continue;
  182. if (enforce_qp_pkey_security(pkey_val,
  183. subnet_prefix,
  184. pp->sec)) {
  185. atomic_inc(&pp->sec->error_list_count);
  186. list_add(&pp->to_error_list,
  187. &to_error_list);
  188. }
  189. }
  190. spin_unlock(&pkey->qp_list_lock);
  191. }
  192. list_for_each_entry_safe(pp,
  193. tmp_pp,
  194. &to_error_list,
  195. to_error_list) {
  196. mutex_lock(&pp->sec->mutex);
  197. qp_to_error(pp->sec);
  198. list_del(&pp->to_error_list);
  199. atomic_dec(&pp->sec->error_list_count);
  200. comp = pp->sec->destroying;
  201. mutex_unlock(&pp->sec->mutex);
  202. if (comp)
  203. complete(&pp->sec->error_complete);
  204. }
  205. }
  206. /* The caller of this function must hold the QP security
  207. * mutex.
  208. */
  209. static int port_pkey_list_insert(struct ib_port_pkey *pp)
  210. {
  211. struct pkey_index_qp_list *tmp_pkey;
  212. struct pkey_index_qp_list *pkey;
  213. struct ib_device *dev;
  214. u8 port_num = pp->port_num;
  215. int ret = 0;
  216. if (pp->state != IB_PORT_PKEY_VALID)
  217. return 0;
  218. dev = pp->sec->dev;
  219. pkey = get_pkey_idx_qp_list(pp);
  220. if (!pkey) {
  221. bool found = false;
  222. pkey = kzalloc(sizeof(*pkey), GFP_KERNEL);
  223. if (!pkey)
  224. return -ENOMEM;
  225. spin_lock(&dev->port_pkey_list[port_num].list_lock);
  226. /* Check for the PKey again. A racing process may
  227. * have created it.
  228. */
  229. list_for_each_entry(tmp_pkey,
  230. &dev->port_pkey_list[port_num].pkey_list,
  231. pkey_index_list) {
  232. if (tmp_pkey->pkey_index == pp->pkey_index) {
  233. kfree(pkey);
  234. pkey = tmp_pkey;
  235. found = true;
  236. break;
  237. }
  238. }
  239. if (!found) {
  240. pkey->pkey_index = pp->pkey_index;
  241. spin_lock_init(&pkey->qp_list_lock);
  242. INIT_LIST_HEAD(&pkey->qp_list);
  243. list_add(&pkey->pkey_index_list,
  244. &dev->port_pkey_list[port_num].pkey_list);
  245. }
  246. spin_unlock(&dev->port_pkey_list[port_num].list_lock);
  247. }
  248. spin_lock(&pkey->qp_list_lock);
  249. list_add(&pp->qp_list, &pkey->qp_list);
  250. spin_unlock(&pkey->qp_list_lock);
  251. pp->state = IB_PORT_PKEY_LISTED;
  252. return ret;
  253. }
  254. /* The caller of this function must hold the QP security
  255. * mutex.
  256. */
  257. static void port_pkey_list_remove(struct ib_port_pkey *pp)
  258. {
  259. struct pkey_index_qp_list *pkey;
  260. if (pp->state != IB_PORT_PKEY_LISTED)
  261. return;
  262. pkey = get_pkey_idx_qp_list(pp);
  263. spin_lock(&pkey->qp_list_lock);
  264. list_del(&pp->qp_list);
  265. spin_unlock(&pkey->qp_list_lock);
  266. /* The setting may still be valid, i.e. after
  267. * a destroy has failed for example.
  268. */
  269. pp->state = IB_PORT_PKEY_VALID;
  270. }
  271. static void destroy_qp_security(struct ib_qp_security *sec)
  272. {
  273. security_ib_free_security(sec->security);
  274. kfree(sec->ports_pkeys);
  275. kfree(sec);
  276. }
  277. /* The caller of this function must hold the QP security
  278. * mutex.
  279. */
  280. static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
  281. const struct ib_qp_attr *qp_attr,
  282. int qp_attr_mask)
  283. {
  284. struct ib_ports_pkeys *new_pps;
  285. struct ib_ports_pkeys *qp_pps = qp->qp_sec->ports_pkeys;
  286. new_pps = kzalloc(sizeof(*new_pps), GFP_KERNEL);
  287. if (!new_pps)
  288. return NULL;
  289. if (qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) {
  290. if (!qp_pps) {
  291. new_pps->main.port_num = qp_attr->port_num;
  292. new_pps->main.pkey_index = qp_attr->pkey_index;
  293. } else {
  294. new_pps->main.port_num = (qp_attr_mask & IB_QP_PORT) ?
  295. qp_attr->port_num :
  296. qp_pps->main.port_num;
  297. new_pps->main.pkey_index =
  298. (qp_attr_mask & IB_QP_PKEY_INDEX) ?
  299. qp_attr->pkey_index :
  300. qp_pps->main.pkey_index;
  301. }
  302. new_pps->main.state = IB_PORT_PKEY_VALID;
  303. } else if (qp_pps) {
  304. new_pps->main.port_num = qp_pps->main.port_num;
  305. new_pps->main.pkey_index = qp_pps->main.pkey_index;
  306. if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
  307. new_pps->main.state = IB_PORT_PKEY_VALID;
  308. }
  309. if (qp_attr_mask & IB_QP_ALT_PATH) {
  310. new_pps->alt.port_num = qp_attr->alt_port_num;
  311. new_pps->alt.pkey_index = qp_attr->alt_pkey_index;
  312. new_pps->alt.state = IB_PORT_PKEY_VALID;
  313. } else if (qp_pps) {
  314. new_pps->alt.port_num = qp_pps->alt.port_num;
  315. new_pps->alt.pkey_index = qp_pps->alt.pkey_index;
  316. if (qp_pps->alt.state != IB_PORT_PKEY_NOT_VALID)
  317. new_pps->alt.state = IB_PORT_PKEY_VALID;
  318. }
  319. new_pps->main.sec = qp->qp_sec;
  320. new_pps->alt.sec = qp->qp_sec;
  321. return new_pps;
  322. }
  323. int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev)
  324. {
  325. struct ib_qp *real_qp = qp->real_qp;
  326. int ret;
  327. ret = ib_create_qp_security(qp, dev);
  328. if (ret)
  329. return ret;
  330. mutex_lock(&real_qp->qp_sec->mutex);
  331. ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys,
  332. qp->qp_sec);
  333. if (ret)
  334. goto ret;
  335. if (qp != real_qp)
  336. list_add(&qp->qp_sec->shared_qp_list,
  337. &real_qp->qp_sec->shared_qp_list);
  338. ret:
  339. mutex_unlock(&real_qp->qp_sec->mutex);
  340. if (ret)
  341. destroy_qp_security(qp->qp_sec);
  342. return ret;
  343. }
  344. void ib_close_shared_qp_security(struct ib_qp_security *sec)
  345. {
  346. struct ib_qp *real_qp = sec->qp->real_qp;
  347. mutex_lock(&real_qp->qp_sec->mutex);
  348. list_del(&sec->shared_qp_list);
  349. mutex_unlock(&real_qp->qp_sec->mutex);
  350. destroy_qp_security(sec);
  351. }
  352. int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
  353. {
  354. int ret;
  355. qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL);
  356. if (!qp->qp_sec)
  357. return -ENOMEM;
  358. qp->qp_sec->qp = qp;
  359. qp->qp_sec->dev = dev;
  360. mutex_init(&qp->qp_sec->mutex);
  361. INIT_LIST_HEAD(&qp->qp_sec->shared_qp_list);
  362. atomic_set(&qp->qp_sec->error_list_count, 0);
  363. init_completion(&qp->qp_sec->error_complete);
  364. ret = security_ib_alloc_security(&qp->qp_sec->security);
  365. if (ret)
  366. kfree(qp->qp_sec);
  367. return ret;
  368. }
  369. EXPORT_SYMBOL(ib_create_qp_security);
  370. void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
  371. {
  372. mutex_lock(&sec->mutex);
  373. /* Remove the QP from the lists so it won't get added to
  374. * a to_error_list during the destroy process.
  375. */
  376. if (sec->ports_pkeys) {
  377. port_pkey_list_remove(&sec->ports_pkeys->main);
  378. port_pkey_list_remove(&sec->ports_pkeys->alt);
  379. }
  380. /* If the QP is already in one or more of those lists
  381. * the destroying flag will ensure the to error flow
  382. * doesn't operate on an undefined QP.
  383. */
  384. sec->destroying = true;
  385. /* Record the error list count to know how many completions
  386. * to wait for.
  387. */
  388. sec->error_comps_pending = atomic_read(&sec->error_list_count);
  389. mutex_unlock(&sec->mutex);
  390. }
  391. void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
  392. {
  393. int ret;
  394. int i;
  395. /* If a concurrent cache update is in progress this
  396. * QP security could be marked for an error state
  397. * transition. Wait for this to complete.
  398. */
  399. for (i = 0; i < sec->error_comps_pending; i++)
  400. wait_for_completion(&sec->error_complete);
  401. mutex_lock(&sec->mutex);
  402. sec->destroying = false;
  403. /* Restore the position in the lists and verify
  404. * access is still allowed in case a cache update
  405. * occurred while attempting to destroy.
  406. *
  407. * Because these setting were listed already
  408. * and removed during ib_destroy_qp_security_begin
  409. * we know the pkey_index_qp_list for the PKey
  410. * already exists so port_pkey_list_insert won't fail.
  411. */
  412. if (sec->ports_pkeys) {
  413. port_pkey_list_insert(&sec->ports_pkeys->main);
  414. port_pkey_list_insert(&sec->ports_pkeys->alt);
  415. }
  416. ret = check_qp_port_pkey_settings(sec->ports_pkeys, sec);
  417. if (ret)
  418. qp_to_error(sec);
  419. mutex_unlock(&sec->mutex);
  420. }
  421. void ib_destroy_qp_security_end(struct ib_qp_security *sec)
  422. {
  423. int i;
  424. /* If a concurrent cache update is occurring we must
  425. * wait until this QP security structure is processed
  426. * in the QP to error flow before destroying it because
  427. * the to_error_list is in use.
  428. */
  429. for (i = 0; i < sec->error_comps_pending; i++)
  430. wait_for_completion(&sec->error_complete);
  431. destroy_qp_security(sec);
  432. }
  433. void ib_security_cache_change(struct ib_device *device,
  434. u8 port_num,
  435. u64 subnet_prefix)
  436. {
  437. struct pkey_index_qp_list *pkey;
  438. list_for_each_entry(pkey,
  439. &device->port_pkey_list[port_num].pkey_list,
  440. pkey_index_list) {
  441. check_pkey_qps(pkey,
  442. device,
  443. port_num,
  444. subnet_prefix);
  445. }
  446. }
  447. void ib_security_destroy_port_pkey_list(struct ib_device *device)
  448. {
  449. struct pkey_index_qp_list *pkey, *tmp_pkey;
  450. int i;
  451. for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
  452. spin_lock(&device->port_pkey_list[i].list_lock);
  453. list_for_each_entry_safe(pkey,
  454. tmp_pkey,
  455. &device->port_pkey_list[i].pkey_list,
  456. pkey_index_list) {
  457. list_del(&pkey->pkey_index_list);
  458. kfree(pkey);
  459. }
  460. spin_unlock(&device->port_pkey_list[i].list_lock);
  461. }
  462. }
  463. int ib_security_modify_qp(struct ib_qp *qp,
  464. struct ib_qp_attr *qp_attr,
  465. int qp_attr_mask,
  466. struct ib_udata *udata)
  467. {
  468. int ret = 0;
  469. struct ib_ports_pkeys *tmp_pps;
  470. struct ib_ports_pkeys *new_pps;
  471. bool special_qp = (qp->qp_type == IB_QPT_SMI ||
  472. qp->qp_type == IB_QPT_GSI ||
  473. qp->qp_type >= IB_QPT_RESERVED1);
  474. bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
  475. (qp_attr_mask & IB_QP_ALT_PATH));
  476. if (pps_change && !special_qp) {
  477. mutex_lock(&qp->qp_sec->mutex);
  478. new_pps = get_new_pps(qp,
  479. qp_attr,
  480. qp_attr_mask);
  481. /* Add this QP to the lists for the new port
  482. * and pkey settings before checking for permission
  483. * in case there is a concurrent cache update
  484. * occurring. Walking the list for a cache change
  485. * doesn't acquire the security mutex unless it's
  486. * sending the QP to error.
  487. */
  488. ret = port_pkey_list_insert(&new_pps->main);
  489. if (!ret)
  490. ret = port_pkey_list_insert(&new_pps->alt);
  491. if (!ret)
  492. ret = check_qp_port_pkey_settings(new_pps,
  493. qp->qp_sec);
  494. }
  495. if (!ret)
  496. ret = qp->device->modify_qp(qp->real_qp,
  497. qp_attr,
  498. qp_attr_mask,
  499. udata);
  500. if (pps_change && !special_qp) {
  501. /* Clean up the lists and free the appropriate
  502. * ports_pkeys structure.
  503. */
  504. if (ret) {
  505. tmp_pps = new_pps;
  506. } else {
  507. tmp_pps = qp->qp_sec->ports_pkeys;
  508. qp->qp_sec->ports_pkeys = new_pps;
  509. }
  510. if (tmp_pps) {
  511. port_pkey_list_remove(&tmp_pps->main);
  512. port_pkey_list_remove(&tmp_pps->alt);
  513. }
  514. kfree(tmp_pps);
  515. mutex_unlock(&qp->qp_sec->mutex);
  516. }
  517. return ret;
  518. }
  519. EXPORT_SYMBOL(ib_security_modify_qp);
  520. int ib_security_pkey_access(struct ib_device *dev,
  521. u8 port_num,
  522. u16 pkey_index,
  523. void *sec)
  524. {
  525. u64 subnet_prefix;
  526. u16 pkey;
  527. int ret;
  528. ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey);
  529. if (ret)
  530. return ret;
  531. ret = ib_get_cached_subnet_prefix(dev, port_num, &subnet_prefix);
  532. if (ret)
  533. return ret;
  534. return security_ib_pkey_access(sec, subnet_prefix, pkey);
  535. }
  536. EXPORT_SYMBOL(ib_security_pkey_access);
  537. static int ib_mad_agent_security_change(struct notifier_block *nb,
  538. unsigned long event,
  539. void *data)
  540. {
  541. struct ib_mad_agent *ag = container_of(nb, struct ib_mad_agent, lsm_nb);
  542. if (event != LSM_POLICY_CHANGE)
  543. return NOTIFY_DONE;
  544. ag->smp_allowed = !security_ib_endport_manage_subnet(ag->security,
  545. ag->device->name,
  546. ag->port_num);
  547. return NOTIFY_OK;
  548. }
  549. int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
  550. enum ib_qp_type qp_type)
  551. {
  552. int ret;
  553. ret = security_ib_alloc_security(&agent->security);
  554. if (ret)
  555. return ret;
  556. if (qp_type != IB_QPT_SMI)
  557. return 0;
  558. ret = security_ib_endport_manage_subnet(agent->security,
  559. agent->device->name,
  560. agent->port_num);
  561. if (ret)
  562. return ret;
  563. agent->lsm_nb.notifier_call = ib_mad_agent_security_change;
  564. ret = register_lsm_notifier(&agent->lsm_nb);
  565. if (ret)
  566. return ret;
  567. agent->smp_allowed = true;
  568. agent->lsm_nb_reg = true;
  569. return 0;
  570. }
  571. void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
  572. {
  573. security_ib_free_security(agent->security);
  574. if (agent->lsm_nb_reg)
  575. unregister_lsm_notifier(&agent->lsm_nb);
  576. }
  577. int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
  578. {
  579. int ret;
  580. if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed)
  581. return -EACCES;
  582. ret = ib_security_pkey_access(map->agent.device,
  583. map->agent.port_num,
  584. pkey_index,
  585. map->agent.security);
  586. if (ret)
  587. return ret;
  588. return 0;
  589. }
  590. #endif /* CONFIG_SECURITY_INFINIBAND */