security.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755
  1. /*
  2. * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #ifdef CONFIG_SECURITY_INFINIBAND
  33. #include <linux/security.h>
  34. #include <linux/completion.h>
  35. #include <linux/list.h>
  36. #include <rdma/ib_verbs.h>
  37. #include <rdma/ib_cache.h>
  38. #include "core_priv.h"
  39. #include "mad_priv.h"
  40. static struct pkey_index_qp_list *get_pkey_idx_qp_list(struct ib_port_pkey *pp)
  41. {
  42. struct pkey_index_qp_list *pkey = NULL;
  43. struct pkey_index_qp_list *tmp_pkey;
  44. struct ib_device *dev = pp->sec->dev;
  45. spin_lock(&dev->port_pkey_list[pp->port_num].list_lock);
  46. list_for_each_entry(tmp_pkey,
  47. &dev->port_pkey_list[pp->port_num].pkey_list,
  48. pkey_index_list) {
  49. if (tmp_pkey->pkey_index == pp->pkey_index) {
  50. pkey = tmp_pkey;
  51. break;
  52. }
  53. }
  54. spin_unlock(&dev->port_pkey_list[pp->port_num].list_lock);
  55. return pkey;
  56. }
  57. static int get_pkey_and_subnet_prefix(struct ib_port_pkey *pp,
  58. u16 *pkey,
  59. u64 *subnet_prefix)
  60. {
  61. struct ib_device *dev = pp->sec->dev;
  62. int ret;
  63. ret = ib_get_cached_pkey(dev, pp->port_num, pp->pkey_index, pkey);
  64. if (ret)
  65. return ret;
  66. ret = ib_get_cached_subnet_prefix(dev, pp->port_num, subnet_prefix);
  67. return ret;
  68. }
  69. static int enforce_qp_pkey_security(u16 pkey,
  70. u64 subnet_prefix,
  71. struct ib_qp_security *qp_sec)
  72. {
  73. struct ib_qp_security *shared_qp_sec;
  74. int ret;
  75. ret = security_ib_pkey_access(qp_sec->security, subnet_prefix, pkey);
  76. if (ret)
  77. return ret;
  78. list_for_each_entry(shared_qp_sec,
  79. &qp_sec->shared_qp_list,
  80. shared_qp_list) {
  81. ret = security_ib_pkey_access(shared_qp_sec->security,
  82. subnet_prefix,
  83. pkey);
  84. if (ret)
  85. return ret;
  86. }
  87. return 0;
  88. }
  89. /* The caller of this function must hold the QP security
  90. * mutex of the QP of the security structure in *pps.
  91. *
  92. * It takes separate ports_pkeys and security structure
  93. * because in some cases the pps will be for a new settings
  94. * or the pps will be for the real QP and security structure
  95. * will be for a shared QP.
  96. */
  97. static int check_qp_port_pkey_settings(struct ib_ports_pkeys *pps,
  98. struct ib_qp_security *sec)
  99. {
  100. u64 subnet_prefix;
  101. u16 pkey;
  102. int ret = 0;
  103. if (!pps)
  104. return 0;
  105. if (pps->main.state != IB_PORT_PKEY_NOT_VALID) {
  106. ret = get_pkey_and_subnet_prefix(&pps->main,
  107. &pkey,
  108. &subnet_prefix);
  109. if (ret)
  110. return ret;
  111. ret = enforce_qp_pkey_security(pkey,
  112. subnet_prefix,
  113. sec);
  114. if (ret)
  115. return ret;
  116. }
  117. if (pps->alt.state != IB_PORT_PKEY_NOT_VALID) {
  118. ret = get_pkey_and_subnet_prefix(&pps->alt,
  119. &pkey,
  120. &subnet_prefix);
  121. if (ret)
  122. return ret;
  123. ret = enforce_qp_pkey_security(pkey,
  124. subnet_prefix,
  125. sec);
  126. }
  127. return ret;
  128. }
  129. /* The caller of this function must hold the QP security
  130. * mutex.
  131. */
  132. static void qp_to_error(struct ib_qp_security *sec)
  133. {
  134. struct ib_qp_security *shared_qp_sec;
  135. struct ib_qp_attr attr = {
  136. .qp_state = IB_QPS_ERR
  137. };
  138. struct ib_event event = {
  139. .event = IB_EVENT_QP_FATAL
  140. };
  141. /* If the QP is in the process of being destroyed
  142. * the qp pointer in the security structure is
  143. * undefined. It cannot be modified now.
  144. */
  145. if (sec->destroying)
  146. return;
  147. ib_modify_qp(sec->qp,
  148. &attr,
  149. IB_QP_STATE);
  150. if (sec->qp->event_handler && sec->qp->qp_context) {
  151. event.element.qp = sec->qp;
  152. sec->qp->event_handler(&event,
  153. sec->qp->qp_context);
  154. }
  155. list_for_each_entry(shared_qp_sec,
  156. &sec->shared_qp_list,
  157. shared_qp_list) {
  158. struct ib_qp *qp = shared_qp_sec->qp;
  159. if (qp->event_handler && qp->qp_context) {
  160. event.element.qp = qp;
  161. event.device = qp->device;
  162. qp->event_handler(&event,
  163. qp->qp_context);
  164. }
  165. }
  166. }
  167. static inline void check_pkey_qps(struct pkey_index_qp_list *pkey,
  168. struct ib_device *device,
  169. u8 port_num,
  170. u64 subnet_prefix)
  171. {
  172. struct ib_port_pkey *pp, *tmp_pp;
  173. bool comp;
  174. LIST_HEAD(to_error_list);
  175. u16 pkey_val;
  176. if (!ib_get_cached_pkey(device,
  177. port_num,
  178. pkey->pkey_index,
  179. &pkey_val)) {
  180. spin_lock(&pkey->qp_list_lock);
  181. list_for_each_entry(pp, &pkey->qp_list, qp_list) {
  182. if (atomic_read(&pp->sec->error_list_count))
  183. continue;
  184. if (enforce_qp_pkey_security(pkey_val,
  185. subnet_prefix,
  186. pp->sec)) {
  187. atomic_inc(&pp->sec->error_list_count);
  188. list_add(&pp->to_error_list,
  189. &to_error_list);
  190. }
  191. }
  192. spin_unlock(&pkey->qp_list_lock);
  193. }
  194. list_for_each_entry_safe(pp,
  195. tmp_pp,
  196. &to_error_list,
  197. to_error_list) {
  198. mutex_lock(&pp->sec->mutex);
  199. qp_to_error(pp->sec);
  200. list_del(&pp->to_error_list);
  201. atomic_dec(&pp->sec->error_list_count);
  202. comp = pp->sec->destroying;
  203. mutex_unlock(&pp->sec->mutex);
  204. if (comp)
  205. complete(&pp->sec->error_complete);
  206. }
  207. }
  208. /* The caller of this function must hold the QP security
  209. * mutex.
  210. */
  211. static int port_pkey_list_insert(struct ib_port_pkey *pp)
  212. {
  213. struct pkey_index_qp_list *tmp_pkey;
  214. struct pkey_index_qp_list *pkey;
  215. struct ib_device *dev;
  216. u8 port_num = pp->port_num;
  217. int ret = 0;
  218. if (pp->state != IB_PORT_PKEY_VALID)
  219. return 0;
  220. dev = pp->sec->dev;
  221. pkey = get_pkey_idx_qp_list(pp);
  222. if (!pkey) {
  223. bool found = false;
  224. pkey = kzalloc(sizeof(*pkey), GFP_KERNEL);
  225. if (!pkey)
  226. return -ENOMEM;
  227. spin_lock(&dev->port_pkey_list[port_num].list_lock);
  228. /* Check for the PKey again. A racing process may
  229. * have created it.
  230. */
  231. list_for_each_entry(tmp_pkey,
  232. &dev->port_pkey_list[port_num].pkey_list,
  233. pkey_index_list) {
  234. if (tmp_pkey->pkey_index == pp->pkey_index) {
  235. kfree(pkey);
  236. pkey = tmp_pkey;
  237. found = true;
  238. break;
  239. }
  240. }
  241. if (!found) {
  242. pkey->pkey_index = pp->pkey_index;
  243. spin_lock_init(&pkey->qp_list_lock);
  244. INIT_LIST_HEAD(&pkey->qp_list);
  245. list_add(&pkey->pkey_index_list,
  246. &dev->port_pkey_list[port_num].pkey_list);
  247. }
  248. spin_unlock(&dev->port_pkey_list[port_num].list_lock);
  249. }
  250. spin_lock(&pkey->qp_list_lock);
  251. list_add(&pp->qp_list, &pkey->qp_list);
  252. spin_unlock(&pkey->qp_list_lock);
  253. pp->state = IB_PORT_PKEY_LISTED;
  254. return ret;
  255. }
  256. /* The caller of this function must hold the QP security
  257. * mutex.
  258. */
  259. static void port_pkey_list_remove(struct ib_port_pkey *pp)
  260. {
  261. struct pkey_index_qp_list *pkey;
  262. if (pp->state != IB_PORT_PKEY_LISTED)
  263. return;
  264. pkey = get_pkey_idx_qp_list(pp);
  265. spin_lock(&pkey->qp_list_lock);
  266. list_del(&pp->qp_list);
  267. spin_unlock(&pkey->qp_list_lock);
  268. /* The setting may still be valid, i.e. after
  269. * a destroy has failed for example.
  270. */
  271. pp->state = IB_PORT_PKEY_VALID;
  272. }
  273. static void destroy_qp_security(struct ib_qp_security *sec)
  274. {
  275. security_ib_free_security(sec->security);
  276. kfree(sec->ports_pkeys);
  277. kfree(sec);
  278. }
  279. /* The caller of this function must hold the QP security
  280. * mutex.
  281. */
  282. static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
  283. const struct ib_qp_attr *qp_attr,
  284. int qp_attr_mask)
  285. {
  286. struct ib_ports_pkeys *new_pps;
  287. struct ib_ports_pkeys *qp_pps = qp->qp_sec->ports_pkeys;
  288. new_pps = kzalloc(sizeof(*new_pps), GFP_KERNEL);
  289. if (!new_pps)
  290. return NULL;
  291. if (qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) {
  292. if (!qp_pps) {
  293. new_pps->main.port_num = qp_attr->port_num;
  294. new_pps->main.pkey_index = qp_attr->pkey_index;
  295. } else {
  296. new_pps->main.port_num = (qp_attr_mask & IB_QP_PORT) ?
  297. qp_attr->port_num :
  298. qp_pps->main.port_num;
  299. new_pps->main.pkey_index =
  300. (qp_attr_mask & IB_QP_PKEY_INDEX) ?
  301. qp_attr->pkey_index :
  302. qp_pps->main.pkey_index;
  303. }
  304. new_pps->main.state = IB_PORT_PKEY_VALID;
  305. } else if (qp_pps) {
  306. new_pps->main.port_num = qp_pps->main.port_num;
  307. new_pps->main.pkey_index = qp_pps->main.pkey_index;
  308. if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
  309. new_pps->main.state = IB_PORT_PKEY_VALID;
  310. }
  311. if (qp_attr_mask & IB_QP_ALT_PATH) {
  312. new_pps->alt.port_num = qp_attr->alt_port_num;
  313. new_pps->alt.pkey_index = qp_attr->alt_pkey_index;
  314. new_pps->alt.state = IB_PORT_PKEY_VALID;
  315. } else if (qp_pps) {
  316. new_pps->alt.port_num = qp_pps->alt.port_num;
  317. new_pps->alt.pkey_index = qp_pps->alt.pkey_index;
  318. if (qp_pps->alt.state != IB_PORT_PKEY_NOT_VALID)
  319. new_pps->alt.state = IB_PORT_PKEY_VALID;
  320. }
  321. new_pps->main.sec = qp->qp_sec;
  322. new_pps->alt.sec = qp->qp_sec;
  323. return new_pps;
  324. }
  325. int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev)
  326. {
  327. struct ib_qp *real_qp = qp->real_qp;
  328. int ret;
  329. ret = ib_create_qp_security(qp, dev);
  330. if (ret)
  331. return ret;
  332. if (!qp->qp_sec)
  333. return 0;
  334. mutex_lock(&real_qp->qp_sec->mutex);
  335. ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys,
  336. qp->qp_sec);
  337. if (ret)
  338. goto ret;
  339. if (qp != real_qp)
  340. list_add(&qp->qp_sec->shared_qp_list,
  341. &real_qp->qp_sec->shared_qp_list);
  342. ret:
  343. mutex_unlock(&real_qp->qp_sec->mutex);
  344. if (ret)
  345. destroy_qp_security(qp->qp_sec);
  346. return ret;
  347. }
  348. void ib_close_shared_qp_security(struct ib_qp_security *sec)
  349. {
  350. struct ib_qp *real_qp = sec->qp->real_qp;
  351. mutex_lock(&real_qp->qp_sec->mutex);
  352. list_del(&sec->shared_qp_list);
  353. mutex_unlock(&real_qp->qp_sec->mutex);
  354. destroy_qp_security(sec);
  355. }
  356. int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
  357. {
  358. u8 i = rdma_start_port(dev);
  359. bool is_ib = false;
  360. int ret;
  361. while (i <= rdma_end_port(dev) && !is_ib)
  362. is_ib = rdma_protocol_ib(dev, i++);
  363. /* If this isn't an IB device don't create the security context */
  364. if (!is_ib)
  365. return 0;
  366. qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL);
  367. if (!qp->qp_sec)
  368. return -ENOMEM;
  369. qp->qp_sec->qp = qp;
  370. qp->qp_sec->dev = dev;
  371. mutex_init(&qp->qp_sec->mutex);
  372. INIT_LIST_HEAD(&qp->qp_sec->shared_qp_list);
  373. atomic_set(&qp->qp_sec->error_list_count, 0);
  374. init_completion(&qp->qp_sec->error_complete);
  375. ret = security_ib_alloc_security(&qp->qp_sec->security);
  376. if (ret) {
  377. kfree(qp->qp_sec);
  378. qp->qp_sec = NULL;
  379. }
  380. return ret;
  381. }
  382. EXPORT_SYMBOL(ib_create_qp_security);
  383. void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
  384. {
  385. /* Return if not IB */
  386. if (!sec)
  387. return;
  388. mutex_lock(&sec->mutex);
  389. /* Remove the QP from the lists so it won't get added to
  390. * a to_error_list during the destroy process.
  391. */
  392. if (sec->ports_pkeys) {
  393. port_pkey_list_remove(&sec->ports_pkeys->main);
  394. port_pkey_list_remove(&sec->ports_pkeys->alt);
  395. }
  396. /* If the QP is already in one or more of those lists
  397. * the destroying flag will ensure the to error flow
  398. * doesn't operate on an undefined QP.
  399. */
  400. sec->destroying = true;
  401. /* Record the error list count to know how many completions
  402. * to wait for.
  403. */
  404. sec->error_comps_pending = atomic_read(&sec->error_list_count);
  405. mutex_unlock(&sec->mutex);
  406. }
  407. void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
  408. {
  409. int ret;
  410. int i;
  411. /* Return if not IB */
  412. if (!sec)
  413. return;
  414. /* If a concurrent cache update is in progress this
  415. * QP security could be marked for an error state
  416. * transition. Wait for this to complete.
  417. */
  418. for (i = 0; i < sec->error_comps_pending; i++)
  419. wait_for_completion(&sec->error_complete);
  420. mutex_lock(&sec->mutex);
  421. sec->destroying = false;
  422. /* Restore the position in the lists and verify
  423. * access is still allowed in case a cache update
  424. * occurred while attempting to destroy.
  425. *
  426. * Because these setting were listed already
  427. * and removed during ib_destroy_qp_security_begin
  428. * we know the pkey_index_qp_list for the PKey
  429. * already exists so port_pkey_list_insert won't fail.
  430. */
  431. if (sec->ports_pkeys) {
  432. port_pkey_list_insert(&sec->ports_pkeys->main);
  433. port_pkey_list_insert(&sec->ports_pkeys->alt);
  434. }
  435. ret = check_qp_port_pkey_settings(sec->ports_pkeys, sec);
  436. if (ret)
  437. qp_to_error(sec);
  438. mutex_unlock(&sec->mutex);
  439. }
  440. void ib_destroy_qp_security_end(struct ib_qp_security *sec)
  441. {
  442. int i;
  443. /* Return if not IB */
  444. if (!sec)
  445. return;
  446. /* If a concurrent cache update is occurring we must
  447. * wait until this QP security structure is processed
  448. * in the QP to error flow before destroying it because
  449. * the to_error_list is in use.
  450. */
  451. for (i = 0; i < sec->error_comps_pending; i++)
  452. wait_for_completion(&sec->error_complete);
  453. destroy_qp_security(sec);
  454. }
  455. void ib_security_cache_change(struct ib_device *device,
  456. u8 port_num,
  457. u64 subnet_prefix)
  458. {
  459. struct pkey_index_qp_list *pkey;
  460. list_for_each_entry(pkey,
  461. &device->port_pkey_list[port_num].pkey_list,
  462. pkey_index_list) {
  463. check_pkey_qps(pkey,
  464. device,
  465. port_num,
  466. subnet_prefix);
  467. }
  468. }
  469. void ib_security_destroy_port_pkey_list(struct ib_device *device)
  470. {
  471. struct pkey_index_qp_list *pkey, *tmp_pkey;
  472. int i;
  473. for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
  474. spin_lock(&device->port_pkey_list[i].list_lock);
  475. list_for_each_entry_safe(pkey,
  476. tmp_pkey,
  477. &device->port_pkey_list[i].pkey_list,
  478. pkey_index_list) {
  479. list_del(&pkey->pkey_index_list);
  480. kfree(pkey);
  481. }
  482. spin_unlock(&device->port_pkey_list[i].list_lock);
  483. }
  484. }
  485. int ib_security_modify_qp(struct ib_qp *qp,
  486. struct ib_qp_attr *qp_attr,
  487. int qp_attr_mask,
  488. struct ib_udata *udata)
  489. {
  490. int ret = 0;
  491. struct ib_ports_pkeys *tmp_pps;
  492. struct ib_ports_pkeys *new_pps = NULL;
  493. struct ib_qp *real_qp = qp->real_qp;
  494. bool special_qp = (real_qp->qp_type == IB_QPT_SMI ||
  495. real_qp->qp_type == IB_QPT_GSI ||
  496. real_qp->qp_type >= IB_QPT_RESERVED1);
  497. bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
  498. (qp_attr_mask & IB_QP_ALT_PATH));
  499. WARN_ONCE((qp_attr_mask & IB_QP_PORT &&
  500. rdma_protocol_ib(real_qp->device, qp_attr->port_num) &&
  501. !real_qp->qp_sec),
  502. "%s: QP security is not initialized for IB QP: %d\n",
  503. __func__, real_qp->qp_num);
  504. /* The port/pkey settings are maintained only for the real QP. Open
  505. * handles on the real QP will be in the shared_qp_list. When
  506. * enforcing security on the real QP all the shared QPs will be
  507. * checked as well.
  508. */
  509. if (pps_change && !special_qp && real_qp->qp_sec) {
  510. mutex_lock(&real_qp->qp_sec->mutex);
  511. new_pps = get_new_pps(real_qp,
  512. qp_attr,
  513. qp_attr_mask);
  514. if (!new_pps) {
  515. mutex_unlock(&real_qp->qp_sec->mutex);
  516. return -ENOMEM;
  517. }
  518. /* Add this QP to the lists for the new port
  519. * and pkey settings before checking for permission
  520. * in case there is a concurrent cache update
  521. * occurring. Walking the list for a cache change
  522. * doesn't acquire the security mutex unless it's
  523. * sending the QP to error.
  524. */
  525. ret = port_pkey_list_insert(&new_pps->main);
  526. if (!ret)
  527. ret = port_pkey_list_insert(&new_pps->alt);
  528. if (!ret)
  529. ret = check_qp_port_pkey_settings(new_pps,
  530. real_qp->qp_sec);
  531. }
  532. if (!ret)
  533. ret = real_qp->device->modify_qp(real_qp,
  534. qp_attr,
  535. qp_attr_mask,
  536. udata);
  537. if (new_pps) {
  538. /* Clean up the lists and free the appropriate
  539. * ports_pkeys structure.
  540. */
  541. if (ret) {
  542. tmp_pps = new_pps;
  543. } else {
  544. tmp_pps = real_qp->qp_sec->ports_pkeys;
  545. real_qp->qp_sec->ports_pkeys = new_pps;
  546. }
  547. if (tmp_pps) {
  548. port_pkey_list_remove(&tmp_pps->main);
  549. port_pkey_list_remove(&tmp_pps->alt);
  550. }
  551. kfree(tmp_pps);
  552. mutex_unlock(&real_qp->qp_sec->mutex);
  553. }
  554. return ret;
  555. }
  556. static int ib_security_pkey_access(struct ib_device *dev,
  557. u8 port_num,
  558. u16 pkey_index,
  559. void *sec)
  560. {
  561. u64 subnet_prefix;
  562. u16 pkey;
  563. int ret;
  564. if (!rdma_protocol_ib(dev, port_num))
  565. return 0;
  566. ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey);
  567. if (ret)
  568. return ret;
  569. ret = ib_get_cached_subnet_prefix(dev, port_num, &subnet_prefix);
  570. if (ret)
  571. return ret;
  572. return security_ib_pkey_access(sec, subnet_prefix, pkey);
  573. }
  574. static int ib_mad_agent_security_change(struct notifier_block *nb,
  575. unsigned long event,
  576. void *data)
  577. {
  578. struct ib_mad_agent *ag = container_of(nb, struct ib_mad_agent, lsm_nb);
  579. if (event != LSM_POLICY_CHANGE)
  580. return NOTIFY_DONE;
  581. ag->smp_allowed = !security_ib_endport_manage_subnet(ag->security,
  582. ag->device->name,
  583. ag->port_num);
  584. return NOTIFY_OK;
  585. }
  586. int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
  587. enum ib_qp_type qp_type)
  588. {
  589. int ret;
  590. if (!rdma_protocol_ib(agent->device, agent->port_num))
  591. return 0;
  592. ret = security_ib_alloc_security(&agent->security);
  593. if (ret)
  594. return ret;
  595. if (qp_type != IB_QPT_SMI)
  596. return 0;
  597. ret = security_ib_endport_manage_subnet(agent->security,
  598. agent->device->name,
  599. agent->port_num);
  600. if (ret)
  601. return ret;
  602. agent->lsm_nb.notifier_call = ib_mad_agent_security_change;
  603. ret = register_lsm_notifier(&agent->lsm_nb);
  604. if (ret)
  605. return ret;
  606. agent->smp_allowed = true;
  607. agent->lsm_nb_reg = true;
  608. return 0;
  609. }
  610. void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
  611. {
  612. if (!rdma_protocol_ib(agent->device, agent->port_num))
  613. return;
  614. security_ib_free_security(agent->security);
  615. if (agent->lsm_nb_reg)
  616. unregister_lsm_notifier(&agent->lsm_nb);
  617. }
  618. int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
  619. {
  620. if (!rdma_protocol_ib(map->agent.device, map->agent.port_num))
  621. return 0;
  622. if (map->agent.qp->qp_type == IB_QPT_SMI) {
  623. if (!map->agent.smp_allowed)
  624. return -EACCES;
  625. return 0;
  626. }
  627. return ib_security_pkey_access(map->agent.device,
  628. map->agent.port_num,
  629. pkey_index,
  630. map->agent.security);
  631. }
  632. #endif /* CONFIG_SECURITY_INFINIBAND */