qib_keys.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391
  1. /*
  2. * Copyright (c) 2006, 2007, 2009 QLogic Corporation. All rights reserved.
  3. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include "qib.h"
  34. /**
  35. * qib_alloc_lkey - allocate an lkey
  36. * @mr: memory region that this lkey protects
  37. * @dma_region: 0->normal key, 1->restricted DMA key
  38. *
  39. * Returns 0 if successful, otherwise returns -errno.
  40. *
  41. * Increments mr reference count as required.
  42. *
  43. * Sets the lkey field mr for non-dma regions.
  44. *
  45. */
  46. int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
  47. {
  48. unsigned long flags;
  49. u32 r;
  50. u32 n;
  51. int ret = 0;
  52. struct qib_ibdev *dev = to_idev(mr->pd->device);
  53. struct qib_lkey_table *rkt = &dev->lk_table;
  54. spin_lock_irqsave(&rkt->lock, flags);
  55. /* special case for dma_mr lkey == 0 */
  56. if (dma_region) {
  57. struct qib_mregion *tmr;
  58. tmr = rcu_access_pointer(dev->dma_mr);
  59. if (!tmr) {
  60. qib_get_mr(mr);
  61. rcu_assign_pointer(dev->dma_mr, mr);
  62. mr->lkey_published = 1;
  63. }
  64. goto success;
  65. }
  66. /* Find the next available LKEY */
  67. r = rkt->next;
  68. n = r;
  69. for (;;) {
  70. if (rkt->table[r] == NULL)
  71. break;
  72. r = (r + 1) & (rkt->max - 1);
  73. if (r == n)
  74. goto bail;
  75. }
  76. rkt->next = (r + 1) & (rkt->max - 1);
  77. /*
  78. * Make sure lkey is never zero which is reserved to indicate an
  79. * unrestricted LKEY.
  80. */
  81. rkt->gen++;
  82. /*
  83. * bits are capped in qib_verbs.c to insure enough bits
  84. * for generation number
  85. */
  86. mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
  87. ((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
  88. << 8);
  89. if (mr->lkey == 0) {
  90. mr->lkey |= 1 << 8;
  91. rkt->gen++;
  92. }
  93. qib_get_mr(mr);
  94. rcu_assign_pointer(rkt->table[r], mr);
  95. mr->lkey_published = 1;
  96. success:
  97. spin_unlock_irqrestore(&rkt->lock, flags);
  98. out:
  99. return ret;
  100. bail:
  101. spin_unlock_irqrestore(&rkt->lock, flags);
  102. ret = -ENOMEM;
  103. goto out;
  104. }
  105. /**
  106. * qib_free_lkey - free an lkey
  107. * @mr: mr to free from tables
  108. */
  109. void qib_free_lkey(struct qib_mregion *mr)
  110. {
  111. unsigned long flags;
  112. u32 lkey = mr->lkey;
  113. u32 r;
  114. struct qib_ibdev *dev = to_idev(mr->pd->device);
  115. struct qib_lkey_table *rkt = &dev->lk_table;
  116. spin_lock_irqsave(&rkt->lock, flags);
  117. if (!mr->lkey_published)
  118. goto out;
  119. if (lkey == 0)
  120. RCU_INIT_POINTER(dev->dma_mr, NULL);
  121. else {
  122. r = lkey >> (32 - ib_qib_lkey_table_size);
  123. RCU_INIT_POINTER(rkt->table[r], NULL);
  124. }
  125. qib_put_mr(mr);
  126. mr->lkey_published = 0;
  127. out:
  128. spin_unlock_irqrestore(&rkt->lock, flags);
  129. }
  130. /**
  131. * qib_lkey_ok - check IB SGE for validity and initialize
  132. * @rkt: table containing lkey to check SGE against
  133. * @pd: protection domain
  134. * @isge: outgoing internal SGE
  135. * @sge: SGE to check
  136. * @acc: access flags
  137. *
  138. * Return 1 if valid and successful, otherwise returns 0.
  139. *
  140. * increments the reference count upon success
  141. *
  142. * Check the IB SGE for validity and initialize our internal version
  143. * of it.
  144. */
  145. int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
  146. struct qib_sge *isge, struct ib_sge *sge, int acc)
  147. {
  148. struct qib_mregion *mr;
  149. unsigned n, m;
  150. size_t off;
  151. /*
  152. * We use LKEY == zero for kernel virtual addresses
  153. * (see qib_get_dma_mr and qib_dma.c).
  154. */
  155. rcu_read_lock();
  156. if (sge->lkey == 0) {
  157. struct qib_ibdev *dev = to_idev(pd->ibpd.device);
  158. if (pd->user)
  159. goto bail;
  160. mr = rcu_dereference(dev->dma_mr);
  161. if (!mr)
  162. goto bail;
  163. if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
  164. goto bail;
  165. rcu_read_unlock();
  166. isge->mr = mr;
  167. isge->vaddr = (void *) sge->addr;
  168. isge->length = sge->length;
  169. isge->sge_length = sge->length;
  170. isge->m = 0;
  171. isge->n = 0;
  172. goto ok;
  173. }
  174. mr = rcu_dereference(
  175. rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))]);
  176. if (unlikely(!mr || mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
  177. goto bail;
  178. off = sge->addr - mr->user_base;
  179. if (unlikely(sge->addr < mr->user_base ||
  180. off + sge->length > mr->length ||
  181. (mr->access_flags & acc) != acc))
  182. goto bail;
  183. if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
  184. goto bail;
  185. rcu_read_unlock();
  186. off += mr->offset;
  187. if (mr->page_shift) {
  188. /*
  189. page sizes are uniform power of 2 so no loop is necessary
  190. entries_spanned_by_off is the number of times the loop below
  191. would have executed.
  192. */
  193. size_t entries_spanned_by_off;
  194. entries_spanned_by_off = off >> mr->page_shift;
  195. off -= (entries_spanned_by_off << mr->page_shift);
  196. m = entries_spanned_by_off/QIB_SEGSZ;
  197. n = entries_spanned_by_off%QIB_SEGSZ;
  198. } else {
  199. m = 0;
  200. n = 0;
  201. while (off >= mr->map[m]->segs[n].length) {
  202. off -= mr->map[m]->segs[n].length;
  203. n++;
  204. if (n >= QIB_SEGSZ) {
  205. m++;
  206. n = 0;
  207. }
  208. }
  209. }
  210. isge->mr = mr;
  211. isge->vaddr = mr->map[m]->segs[n].vaddr + off;
  212. isge->length = mr->map[m]->segs[n].length - off;
  213. isge->sge_length = sge->length;
  214. isge->m = m;
  215. isge->n = n;
  216. ok:
  217. return 1;
  218. bail:
  219. rcu_read_unlock();
  220. return 0;
  221. }
  222. /**
  223. * qib_rkey_ok - check the IB virtual address, length, and RKEY
  224. * @qp: qp for validation
  225. * @sge: SGE state
  226. * @len: length of data
  227. * @vaddr: virtual address to place data
  228. * @rkey: rkey to check
  229. * @acc: access flags
  230. *
  231. * Return 1 if successful, otherwise 0.
  232. *
  233. * increments the reference count upon success
  234. */
  235. int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
  236. u32 len, u64 vaddr, u32 rkey, int acc)
  237. {
  238. struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
  239. struct qib_mregion *mr;
  240. unsigned n, m;
  241. size_t off;
  242. /*
  243. * We use RKEY == zero for kernel virtual addresses
  244. * (see qib_get_dma_mr and qib_dma.c).
  245. */
  246. rcu_read_lock();
  247. if (rkey == 0) {
  248. struct qib_pd *pd = to_ipd(qp->ibqp.pd);
  249. struct qib_ibdev *dev = to_idev(pd->ibpd.device);
  250. if (pd->user)
  251. goto bail;
  252. mr = rcu_dereference(dev->dma_mr);
  253. if (!mr)
  254. goto bail;
  255. if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
  256. goto bail;
  257. rcu_read_unlock();
  258. sge->mr = mr;
  259. sge->vaddr = (void *) vaddr;
  260. sge->length = len;
  261. sge->sge_length = len;
  262. sge->m = 0;
  263. sge->n = 0;
  264. goto ok;
  265. }
  266. mr = rcu_dereference(
  267. rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))]);
  268. if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
  269. goto bail;
  270. off = vaddr - mr->iova;
  271. if (unlikely(vaddr < mr->iova || off + len > mr->length ||
  272. (mr->access_flags & acc) == 0))
  273. goto bail;
  274. if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
  275. goto bail;
  276. rcu_read_unlock();
  277. off += mr->offset;
  278. if (mr->page_shift) {
  279. /*
  280. page sizes are uniform power of 2 so no loop is necessary
  281. entries_spanned_by_off is the number of times the loop below
  282. would have executed.
  283. */
  284. size_t entries_spanned_by_off;
  285. entries_spanned_by_off = off >> mr->page_shift;
  286. off -= (entries_spanned_by_off << mr->page_shift);
  287. m = entries_spanned_by_off/QIB_SEGSZ;
  288. n = entries_spanned_by_off%QIB_SEGSZ;
  289. } else {
  290. m = 0;
  291. n = 0;
  292. while (off >= mr->map[m]->segs[n].length) {
  293. off -= mr->map[m]->segs[n].length;
  294. n++;
  295. if (n >= QIB_SEGSZ) {
  296. m++;
  297. n = 0;
  298. }
  299. }
  300. }
  301. sge->mr = mr;
  302. sge->vaddr = mr->map[m]->segs[n].vaddr + off;
  303. sge->length = mr->map[m]->segs[n].length - off;
  304. sge->sge_length = len;
  305. sge->m = m;
  306. sge->n = n;
  307. ok:
  308. return 1;
  309. bail:
  310. rcu_read_unlock();
  311. return 0;
  312. }
  313. /*
  314. * Initialize the memory region specified by the work reqeust.
  315. */
  316. int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr)
  317. {
  318. struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
  319. struct qib_pd *pd = to_ipd(qp->ibqp.pd);
  320. struct qib_mregion *mr;
  321. u32 rkey = wr->wr.fast_reg.rkey;
  322. unsigned i, n, m;
  323. int ret = -EINVAL;
  324. unsigned long flags;
  325. u64 *page_list;
  326. size_t ps;
  327. spin_lock_irqsave(&rkt->lock, flags);
  328. if (pd->user || rkey == 0)
  329. goto bail;
  330. mr = rcu_dereference_protected(
  331. rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))],
  332. lockdep_is_held(&rkt->lock));
  333. if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd))
  334. goto bail;
  335. if (wr->wr.fast_reg.page_list_len > mr->max_segs)
  336. goto bail;
  337. ps = 1UL << wr->wr.fast_reg.page_shift;
  338. if (wr->wr.fast_reg.length > ps * wr->wr.fast_reg.page_list_len)
  339. goto bail;
  340. mr->user_base = wr->wr.fast_reg.iova_start;
  341. mr->iova = wr->wr.fast_reg.iova_start;
  342. mr->lkey = rkey;
  343. mr->length = wr->wr.fast_reg.length;
  344. mr->access_flags = wr->wr.fast_reg.access_flags;
  345. page_list = wr->wr.fast_reg.page_list->page_list;
  346. m = 0;
  347. n = 0;
  348. for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
  349. mr->map[m]->segs[n].vaddr = (void *) page_list[i];
  350. mr->map[m]->segs[n].length = ps;
  351. if (++n == QIB_SEGSZ) {
  352. m++;
  353. n = 0;
  354. }
  355. }
  356. ret = 0;
  357. bail:
  358. spin_unlock_irqrestore(&rkt->lock, flags);
  359. return ret;
  360. }