security.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709
  1. /*
  2. * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #ifdef CONFIG_SECURITY_INFINIBAND
  33. #include <linux/security.h>
  34. #include <linux/completion.h>
  35. #include <linux/list.h>
  36. #include <rdma/ib_verbs.h>
  37. #include <rdma/ib_cache.h>
  38. #include "core_priv.h"
  39. #include "mad_priv.h"
  40. static struct pkey_index_qp_list *get_pkey_idx_qp_list(struct ib_port_pkey *pp)
  41. {
  42. struct pkey_index_qp_list *pkey = NULL;
  43. struct pkey_index_qp_list *tmp_pkey;
  44. struct ib_device *dev = pp->sec->dev;
  45. spin_lock(&dev->port_pkey_list[pp->port_num].list_lock);
  46. list_for_each_entry(tmp_pkey,
  47. &dev->port_pkey_list[pp->port_num].pkey_list,
  48. pkey_index_list) {
  49. if (tmp_pkey->pkey_index == pp->pkey_index) {
  50. pkey = tmp_pkey;
  51. break;
  52. }
  53. }
  54. spin_unlock(&dev->port_pkey_list[pp->port_num].list_lock);
  55. return pkey;
  56. }
  57. static int get_pkey_and_subnet_prefix(struct ib_port_pkey *pp,
  58. u16 *pkey,
  59. u64 *subnet_prefix)
  60. {
  61. struct ib_device *dev = pp->sec->dev;
  62. int ret;
  63. ret = ib_get_cached_pkey(dev, pp->port_num, pp->pkey_index, pkey);
  64. if (ret)
  65. return ret;
  66. ret = ib_get_cached_subnet_prefix(dev, pp->port_num, subnet_prefix);
  67. return ret;
  68. }
  69. static int enforce_qp_pkey_security(u16 pkey,
  70. u64 subnet_prefix,
  71. struct ib_qp_security *qp_sec)
  72. {
  73. struct ib_qp_security *shared_qp_sec;
  74. int ret;
  75. ret = security_ib_pkey_access(qp_sec->security, subnet_prefix, pkey);
  76. if (ret)
  77. return ret;
  78. list_for_each_entry(shared_qp_sec,
  79. &qp_sec->shared_qp_list,
  80. shared_qp_list) {
  81. ret = security_ib_pkey_access(shared_qp_sec->security,
  82. subnet_prefix,
  83. pkey);
  84. if (ret)
  85. return ret;
  86. }
  87. return 0;
  88. }
  89. /* The caller of this function must hold the QP security
  90. * mutex of the QP of the security structure in *pps.
  91. *
  92. * It takes separate ports_pkeys and security structure
  93. * because in some cases the pps will be for a new settings
  94. * or the pps will be for the real QP and security structure
  95. * will be for a shared QP.
  96. */
  97. static int check_qp_port_pkey_settings(struct ib_ports_pkeys *pps,
  98. struct ib_qp_security *sec)
  99. {
  100. u64 subnet_prefix;
  101. u16 pkey;
  102. int ret = 0;
  103. if (!pps)
  104. return 0;
  105. if (pps->main.state != IB_PORT_PKEY_NOT_VALID) {
  106. ret = get_pkey_and_subnet_prefix(&pps->main,
  107. &pkey,
  108. &subnet_prefix);
  109. if (ret)
  110. return ret;
  111. ret = enforce_qp_pkey_security(pkey,
  112. subnet_prefix,
  113. sec);
  114. if (ret)
  115. return ret;
  116. }
  117. if (pps->alt.state != IB_PORT_PKEY_NOT_VALID) {
  118. ret = get_pkey_and_subnet_prefix(&pps->alt,
  119. &pkey,
  120. &subnet_prefix);
  121. if (ret)
  122. return ret;
  123. ret = enforce_qp_pkey_security(pkey,
  124. subnet_prefix,
  125. sec);
  126. }
  127. return ret;
  128. }
  129. /* The caller of this function must hold the QP security
  130. * mutex.
  131. */
  132. static void qp_to_error(struct ib_qp_security *sec)
  133. {
  134. struct ib_qp_security *shared_qp_sec;
  135. struct ib_qp_attr attr = {
  136. .qp_state = IB_QPS_ERR
  137. };
  138. struct ib_event event = {
  139. .event = IB_EVENT_QP_FATAL
  140. };
  141. /* If the QP is in the process of being destroyed
  142. * the qp pointer in the security structure is
  143. * undefined. It cannot be modified now.
  144. */
  145. if (sec->destroying)
  146. return;
  147. ib_modify_qp(sec->qp,
  148. &attr,
  149. IB_QP_STATE);
  150. if (sec->qp->event_handler && sec->qp->qp_context) {
  151. event.element.qp = sec->qp;
  152. sec->qp->event_handler(&event,
  153. sec->qp->qp_context);
  154. }
  155. list_for_each_entry(shared_qp_sec,
  156. &sec->shared_qp_list,
  157. shared_qp_list) {
  158. struct ib_qp *qp = shared_qp_sec->qp;
  159. if (qp->event_handler && qp->qp_context) {
  160. event.element.qp = qp;
  161. event.device = qp->device;
  162. qp->event_handler(&event,
  163. qp->qp_context);
  164. }
  165. }
  166. }
  167. static inline void check_pkey_qps(struct pkey_index_qp_list *pkey,
  168. struct ib_device *device,
  169. u8 port_num,
  170. u64 subnet_prefix)
  171. {
  172. struct ib_port_pkey *pp, *tmp_pp;
  173. bool comp;
  174. LIST_HEAD(to_error_list);
  175. u16 pkey_val;
  176. if (!ib_get_cached_pkey(device,
  177. port_num,
  178. pkey->pkey_index,
  179. &pkey_val)) {
  180. spin_lock(&pkey->qp_list_lock);
  181. list_for_each_entry(pp, &pkey->qp_list, qp_list) {
  182. if (atomic_read(&pp->sec->error_list_count))
  183. continue;
  184. if (enforce_qp_pkey_security(pkey_val,
  185. subnet_prefix,
  186. pp->sec)) {
  187. atomic_inc(&pp->sec->error_list_count);
  188. list_add(&pp->to_error_list,
  189. &to_error_list);
  190. }
  191. }
  192. spin_unlock(&pkey->qp_list_lock);
  193. }
  194. list_for_each_entry_safe(pp,
  195. tmp_pp,
  196. &to_error_list,
  197. to_error_list) {
  198. mutex_lock(&pp->sec->mutex);
  199. qp_to_error(pp->sec);
  200. list_del(&pp->to_error_list);
  201. atomic_dec(&pp->sec->error_list_count);
  202. comp = pp->sec->destroying;
  203. mutex_unlock(&pp->sec->mutex);
  204. if (comp)
  205. complete(&pp->sec->error_complete);
  206. }
  207. }
  208. /* The caller of this function must hold the QP security
  209. * mutex.
  210. */
  211. static int port_pkey_list_insert(struct ib_port_pkey *pp)
  212. {
  213. struct pkey_index_qp_list *tmp_pkey;
  214. struct pkey_index_qp_list *pkey;
  215. struct ib_device *dev;
  216. u8 port_num = pp->port_num;
  217. int ret = 0;
  218. if (pp->state != IB_PORT_PKEY_VALID)
  219. return 0;
  220. dev = pp->sec->dev;
  221. pkey = get_pkey_idx_qp_list(pp);
  222. if (!pkey) {
  223. bool found = false;
  224. pkey = kzalloc(sizeof(*pkey), GFP_KERNEL);
  225. if (!pkey)
  226. return -ENOMEM;
  227. spin_lock(&dev->port_pkey_list[port_num].list_lock);
  228. /* Check for the PKey again. A racing process may
  229. * have created it.
  230. */
  231. list_for_each_entry(tmp_pkey,
  232. &dev->port_pkey_list[port_num].pkey_list,
  233. pkey_index_list) {
  234. if (tmp_pkey->pkey_index == pp->pkey_index) {
  235. kfree(pkey);
  236. pkey = tmp_pkey;
  237. found = true;
  238. break;
  239. }
  240. }
  241. if (!found) {
  242. pkey->pkey_index = pp->pkey_index;
  243. spin_lock_init(&pkey->qp_list_lock);
  244. INIT_LIST_HEAD(&pkey->qp_list);
  245. list_add(&pkey->pkey_index_list,
  246. &dev->port_pkey_list[port_num].pkey_list);
  247. }
  248. spin_unlock(&dev->port_pkey_list[port_num].list_lock);
  249. }
  250. spin_lock(&pkey->qp_list_lock);
  251. list_add(&pp->qp_list, &pkey->qp_list);
  252. spin_unlock(&pkey->qp_list_lock);
  253. pp->state = IB_PORT_PKEY_LISTED;
  254. return ret;
  255. }
  256. /* The caller of this function must hold the QP security
  257. * mutex.
  258. */
  259. static void port_pkey_list_remove(struct ib_port_pkey *pp)
  260. {
  261. struct pkey_index_qp_list *pkey;
  262. if (pp->state != IB_PORT_PKEY_LISTED)
  263. return;
  264. pkey = get_pkey_idx_qp_list(pp);
  265. spin_lock(&pkey->qp_list_lock);
  266. list_del(&pp->qp_list);
  267. spin_unlock(&pkey->qp_list_lock);
  268. /* The setting may still be valid, i.e. after
  269. * a destroy has failed for example.
  270. */
  271. pp->state = IB_PORT_PKEY_VALID;
  272. }
  273. static void destroy_qp_security(struct ib_qp_security *sec)
  274. {
  275. security_ib_free_security(sec->security);
  276. kfree(sec->ports_pkeys);
  277. kfree(sec);
  278. }
  279. /* The caller of this function must hold the QP security
  280. * mutex.
  281. */
  282. static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
  283. const struct ib_qp_attr *qp_attr,
  284. int qp_attr_mask)
  285. {
  286. struct ib_ports_pkeys *new_pps;
  287. struct ib_ports_pkeys *qp_pps = qp->qp_sec->ports_pkeys;
  288. new_pps = kzalloc(sizeof(*new_pps), GFP_KERNEL);
  289. if (!new_pps)
  290. return NULL;
  291. if (qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) {
  292. if (!qp_pps) {
  293. new_pps->main.port_num = qp_attr->port_num;
  294. new_pps->main.pkey_index = qp_attr->pkey_index;
  295. } else {
  296. new_pps->main.port_num = (qp_attr_mask & IB_QP_PORT) ?
  297. qp_attr->port_num :
  298. qp_pps->main.port_num;
  299. new_pps->main.pkey_index =
  300. (qp_attr_mask & IB_QP_PKEY_INDEX) ?
  301. qp_attr->pkey_index :
  302. qp_pps->main.pkey_index;
  303. }
  304. new_pps->main.state = IB_PORT_PKEY_VALID;
  305. } else if (qp_pps) {
  306. new_pps->main.port_num = qp_pps->main.port_num;
  307. new_pps->main.pkey_index = qp_pps->main.pkey_index;
  308. if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
  309. new_pps->main.state = IB_PORT_PKEY_VALID;
  310. }
  311. if (qp_attr_mask & IB_QP_ALT_PATH) {
  312. new_pps->alt.port_num = qp_attr->alt_port_num;
  313. new_pps->alt.pkey_index = qp_attr->alt_pkey_index;
  314. new_pps->alt.state = IB_PORT_PKEY_VALID;
  315. } else if (qp_pps) {
  316. new_pps->alt.port_num = qp_pps->alt.port_num;
  317. new_pps->alt.pkey_index = qp_pps->alt.pkey_index;
  318. if (qp_pps->alt.state != IB_PORT_PKEY_NOT_VALID)
  319. new_pps->alt.state = IB_PORT_PKEY_VALID;
  320. }
  321. new_pps->main.sec = qp->qp_sec;
  322. new_pps->alt.sec = qp->qp_sec;
  323. return new_pps;
  324. }
  325. int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev)
  326. {
  327. struct ib_qp *real_qp = qp->real_qp;
  328. int ret;
  329. ret = ib_create_qp_security(qp, dev);
  330. if (ret)
  331. return ret;
  332. mutex_lock(&real_qp->qp_sec->mutex);
  333. ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys,
  334. qp->qp_sec);
  335. if (ret)
  336. goto ret;
  337. if (qp != real_qp)
  338. list_add(&qp->qp_sec->shared_qp_list,
  339. &real_qp->qp_sec->shared_qp_list);
  340. ret:
  341. mutex_unlock(&real_qp->qp_sec->mutex);
  342. if (ret)
  343. destroy_qp_security(qp->qp_sec);
  344. return ret;
  345. }
  346. void ib_close_shared_qp_security(struct ib_qp_security *sec)
  347. {
  348. struct ib_qp *real_qp = sec->qp->real_qp;
  349. mutex_lock(&real_qp->qp_sec->mutex);
  350. list_del(&sec->shared_qp_list);
  351. mutex_unlock(&real_qp->qp_sec->mutex);
  352. destroy_qp_security(sec);
  353. }
  354. int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
  355. {
  356. int ret;
  357. qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL);
  358. if (!qp->qp_sec)
  359. return -ENOMEM;
  360. qp->qp_sec->qp = qp;
  361. qp->qp_sec->dev = dev;
  362. mutex_init(&qp->qp_sec->mutex);
  363. INIT_LIST_HEAD(&qp->qp_sec->shared_qp_list);
  364. atomic_set(&qp->qp_sec->error_list_count, 0);
  365. init_completion(&qp->qp_sec->error_complete);
  366. ret = security_ib_alloc_security(&qp->qp_sec->security);
  367. if (ret) {
  368. kfree(qp->qp_sec);
  369. qp->qp_sec = NULL;
  370. }
  371. return ret;
  372. }
  373. EXPORT_SYMBOL(ib_create_qp_security);
  374. void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
  375. {
  376. mutex_lock(&sec->mutex);
  377. /* Remove the QP from the lists so it won't get added to
  378. * a to_error_list during the destroy process.
  379. */
  380. if (sec->ports_pkeys) {
  381. port_pkey_list_remove(&sec->ports_pkeys->main);
  382. port_pkey_list_remove(&sec->ports_pkeys->alt);
  383. }
  384. /* If the QP is already in one or more of those lists
  385. * the destroying flag will ensure the to error flow
  386. * doesn't operate on an undefined QP.
  387. */
  388. sec->destroying = true;
  389. /* Record the error list count to know how many completions
  390. * to wait for.
  391. */
  392. sec->error_comps_pending = atomic_read(&sec->error_list_count);
  393. mutex_unlock(&sec->mutex);
  394. }
  395. void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
  396. {
  397. int ret;
  398. int i;
  399. /* If a concurrent cache update is in progress this
  400. * QP security could be marked for an error state
  401. * transition. Wait for this to complete.
  402. */
  403. for (i = 0; i < sec->error_comps_pending; i++)
  404. wait_for_completion(&sec->error_complete);
  405. mutex_lock(&sec->mutex);
  406. sec->destroying = false;
  407. /* Restore the position in the lists and verify
  408. * access is still allowed in case a cache update
  409. * occurred while attempting to destroy.
  410. *
  411. * Because these setting were listed already
  412. * and removed during ib_destroy_qp_security_begin
  413. * we know the pkey_index_qp_list for the PKey
  414. * already exists so port_pkey_list_insert won't fail.
  415. */
  416. if (sec->ports_pkeys) {
  417. port_pkey_list_insert(&sec->ports_pkeys->main);
  418. port_pkey_list_insert(&sec->ports_pkeys->alt);
  419. }
  420. ret = check_qp_port_pkey_settings(sec->ports_pkeys, sec);
  421. if (ret)
  422. qp_to_error(sec);
  423. mutex_unlock(&sec->mutex);
  424. }
  425. void ib_destroy_qp_security_end(struct ib_qp_security *sec)
  426. {
  427. int i;
  428. /* If a concurrent cache update is occurring we must
  429. * wait until this QP security structure is processed
  430. * in the QP to error flow before destroying it because
  431. * the to_error_list is in use.
  432. */
  433. for (i = 0; i < sec->error_comps_pending; i++)
  434. wait_for_completion(&sec->error_complete);
  435. destroy_qp_security(sec);
  436. }
  437. void ib_security_cache_change(struct ib_device *device,
  438. u8 port_num,
  439. u64 subnet_prefix)
  440. {
  441. struct pkey_index_qp_list *pkey;
  442. list_for_each_entry(pkey,
  443. &device->port_pkey_list[port_num].pkey_list,
  444. pkey_index_list) {
  445. check_pkey_qps(pkey,
  446. device,
  447. port_num,
  448. subnet_prefix);
  449. }
  450. }
  451. void ib_security_destroy_port_pkey_list(struct ib_device *device)
  452. {
  453. struct pkey_index_qp_list *pkey, *tmp_pkey;
  454. int i;
  455. for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
  456. spin_lock(&device->port_pkey_list[i].list_lock);
  457. list_for_each_entry_safe(pkey,
  458. tmp_pkey,
  459. &device->port_pkey_list[i].pkey_list,
  460. pkey_index_list) {
  461. list_del(&pkey->pkey_index_list);
  462. kfree(pkey);
  463. }
  464. spin_unlock(&device->port_pkey_list[i].list_lock);
  465. }
  466. }
  467. int ib_security_modify_qp(struct ib_qp *qp,
  468. struct ib_qp_attr *qp_attr,
  469. int qp_attr_mask,
  470. struct ib_udata *udata)
  471. {
  472. int ret = 0;
  473. struct ib_ports_pkeys *tmp_pps;
  474. struct ib_ports_pkeys *new_pps;
  475. struct ib_qp *real_qp = qp->real_qp;
  476. bool special_qp = (real_qp->qp_type == IB_QPT_SMI ||
  477. real_qp->qp_type == IB_QPT_GSI ||
  478. real_qp->qp_type >= IB_QPT_RESERVED1);
  479. bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
  480. (qp_attr_mask & IB_QP_ALT_PATH));
  481. /* The port/pkey settings are maintained only for the real QP. Open
  482. * handles on the real QP will be in the shared_qp_list. When
  483. * enforcing security on the real QP all the shared QPs will be
  484. * checked as well.
  485. */
  486. if (pps_change && !special_qp) {
  487. mutex_lock(&real_qp->qp_sec->mutex);
  488. new_pps = get_new_pps(real_qp,
  489. qp_attr,
  490. qp_attr_mask);
  491. /* Add this QP to the lists for the new port
  492. * and pkey settings before checking for permission
  493. * in case there is a concurrent cache update
  494. * occurring. Walking the list for a cache change
  495. * doesn't acquire the security mutex unless it's
  496. * sending the QP to error.
  497. */
  498. ret = port_pkey_list_insert(&new_pps->main);
  499. if (!ret)
  500. ret = port_pkey_list_insert(&new_pps->alt);
  501. if (!ret)
  502. ret = check_qp_port_pkey_settings(new_pps,
  503. real_qp->qp_sec);
  504. }
  505. if (!ret)
  506. ret = real_qp->device->modify_qp(real_qp,
  507. qp_attr,
  508. qp_attr_mask,
  509. udata);
  510. if (pps_change && !special_qp) {
  511. /* Clean up the lists and free the appropriate
  512. * ports_pkeys structure.
  513. */
  514. if (ret) {
  515. tmp_pps = new_pps;
  516. } else {
  517. tmp_pps = real_qp->qp_sec->ports_pkeys;
  518. real_qp->qp_sec->ports_pkeys = new_pps;
  519. }
  520. if (tmp_pps) {
  521. port_pkey_list_remove(&tmp_pps->main);
  522. port_pkey_list_remove(&tmp_pps->alt);
  523. }
  524. kfree(tmp_pps);
  525. mutex_unlock(&real_qp->qp_sec->mutex);
  526. }
  527. return ret;
  528. }
  529. EXPORT_SYMBOL(ib_security_modify_qp);
  530. int ib_security_pkey_access(struct ib_device *dev,
  531. u8 port_num,
  532. u16 pkey_index,
  533. void *sec)
  534. {
  535. u64 subnet_prefix;
  536. u16 pkey;
  537. int ret;
  538. ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey);
  539. if (ret)
  540. return ret;
  541. ret = ib_get_cached_subnet_prefix(dev, port_num, &subnet_prefix);
  542. if (ret)
  543. return ret;
  544. return security_ib_pkey_access(sec, subnet_prefix, pkey);
  545. }
  546. EXPORT_SYMBOL(ib_security_pkey_access);
  547. static int ib_mad_agent_security_change(struct notifier_block *nb,
  548. unsigned long event,
  549. void *data)
  550. {
  551. struct ib_mad_agent *ag = container_of(nb, struct ib_mad_agent, lsm_nb);
  552. if (event != LSM_POLICY_CHANGE)
  553. return NOTIFY_DONE;
  554. ag->smp_allowed = !security_ib_endport_manage_subnet(ag->security,
  555. ag->device->name,
  556. ag->port_num);
  557. return NOTIFY_OK;
  558. }
  559. int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
  560. enum ib_qp_type qp_type)
  561. {
  562. int ret;
  563. ret = security_ib_alloc_security(&agent->security);
  564. if (ret)
  565. return ret;
  566. if (qp_type != IB_QPT_SMI)
  567. return 0;
  568. ret = security_ib_endport_manage_subnet(agent->security,
  569. agent->device->name,
  570. agent->port_num);
  571. if (ret)
  572. return ret;
  573. agent->lsm_nb.notifier_call = ib_mad_agent_security_change;
  574. ret = register_lsm_notifier(&agent->lsm_nb);
  575. if (ret)
  576. return ret;
  577. agent->smp_allowed = true;
  578. agent->lsm_nb_reg = true;
  579. return 0;
  580. }
  581. void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
  582. {
  583. security_ib_free_security(agent->security);
  584. if (agent->lsm_nb_reg)
  585. unregister_lsm_notifier(&agent->lsm_nb);
  586. }
  587. int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
  588. {
  589. if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed)
  590. return -EACCES;
  591. return ib_security_pkey_access(map->agent.device,
  592. map->agent.port_num,
  593. pkey_index,
  594. map->agent.security);
  595. }
  596. #endif /* CONFIG_SECURITY_INFINIBAND */