scif_rma_list.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291
  1. /*
  2. * Intel MIC Platform Software Stack (MPSS)
  3. *
  4. * Copyright(c) 2015 Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License, version 2, as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * Intel SCIF driver.
  16. *
  17. */
  18. #include "scif_main.h"
  19. #include <linux/mmu_notifier.h>
  20. #include <linux/highmem.h>
  21. /*
  22. * scif_insert_tcw:
  23. *
  24. * Insert a temp window to the temp registration list sorted by va_for_temp.
  25. * RMA lock must be held.
  26. */
  27. void scif_insert_tcw(struct scif_window *window, struct list_head *head)
  28. {
  29. struct scif_window *curr = NULL;
  30. struct scif_window *prev = list_entry(head, struct scif_window, list);
  31. struct list_head *item;
  32. INIT_LIST_HEAD(&window->list);
  33. /* Compare with tail and if the entry is new tail add it to the end */
  34. if (!list_empty(head)) {
  35. curr = list_entry(head->prev, struct scif_window, list);
  36. if (curr->va_for_temp < window->va_for_temp) {
  37. list_add_tail(&window->list, head);
  38. return;
  39. }
  40. }
  41. list_for_each(item, head) {
  42. curr = list_entry(item, struct scif_window, list);
  43. if (curr->va_for_temp > window->va_for_temp)
  44. break;
  45. prev = curr;
  46. }
  47. list_add(&window->list, &prev->list);
  48. }
  49. /*
  50. * scif_insert_window:
  51. *
  52. * Insert a window to the self registration list sorted by offset.
  53. * RMA lock must be held.
  54. */
  55. void scif_insert_window(struct scif_window *window, struct list_head *head)
  56. {
  57. struct scif_window *curr = NULL, *prev = NULL;
  58. struct list_head *item;
  59. INIT_LIST_HEAD(&window->list);
  60. list_for_each(item, head) {
  61. curr = list_entry(item, struct scif_window, list);
  62. if (curr->offset > window->offset)
  63. break;
  64. prev = curr;
  65. }
  66. if (!prev)
  67. list_add(&window->list, head);
  68. else
  69. list_add(&window->list, &prev->list);
  70. scif_set_window_ref(window, window->nr_pages);
  71. }
  72. /*
  73. * scif_query_tcw:
  74. *
  75. * Query the temp cached registration list of ep for an overlapping window
  76. * in case of permission mismatch, destroy the previous window. if permissions
  77. * match and overlap is partial, destroy the window but return the new range
  78. * RMA lock must be held.
  79. */
  80. int scif_query_tcw(struct scif_endpt *ep, struct scif_rma_req *req)
  81. {
  82. struct list_head *item, *temp, *head = req->head;
  83. struct scif_window *window;
  84. u64 start_va_window, start_va_req = req->va_for_temp;
  85. u64 end_va_window, end_va_req = start_va_req + req->nr_bytes;
  86. if (!req->nr_bytes)
  87. return -EINVAL;
  88. /*
  89. * Avoid traversing the entire list to find out that there
  90. * is no entry that matches
  91. */
  92. if (!list_empty(head)) {
  93. window = list_last_entry(head, struct scif_window, list);
  94. end_va_window = window->va_for_temp +
  95. (window->nr_pages << PAGE_SHIFT);
  96. if (start_va_req > end_va_window)
  97. return -ENXIO;
  98. }
  99. list_for_each_safe(item, temp, head) {
  100. window = list_entry(item, struct scif_window, list);
  101. start_va_window = window->va_for_temp;
  102. end_va_window = window->va_for_temp +
  103. (window->nr_pages << PAGE_SHIFT);
  104. if (start_va_req < start_va_window &&
  105. end_va_req < start_va_window)
  106. break;
  107. if (start_va_req >= end_va_window)
  108. continue;
  109. if ((window->prot & req->prot) == req->prot) {
  110. if (start_va_req >= start_va_window &&
  111. end_va_req <= end_va_window) {
  112. *req->out_window = window;
  113. return 0;
  114. }
  115. /* expand window */
  116. if (start_va_req < start_va_window) {
  117. req->nr_bytes +=
  118. start_va_window - start_va_req;
  119. req->va_for_temp = start_va_window;
  120. }
  121. if (end_va_req >= end_va_window)
  122. req->nr_bytes += end_va_window - end_va_req;
  123. }
  124. /* Destroy the old window to create a new one */
  125. __scif_rma_destroy_tcw_helper(window);
  126. break;
  127. }
  128. return -ENXIO;
  129. }
  130. /*
  131. * scif_query_window:
  132. *
  133. * Query the registration list and check if a valid contiguous
  134. * range of windows exist.
  135. * RMA lock must be held.
  136. */
  137. int scif_query_window(struct scif_rma_req *req)
  138. {
  139. struct list_head *item;
  140. struct scif_window *window;
  141. s64 end_offset, offset = req->offset;
  142. u64 tmp_min, nr_bytes_left = req->nr_bytes;
  143. if (!req->nr_bytes)
  144. return -EINVAL;
  145. list_for_each(item, req->head) {
  146. window = list_entry(item, struct scif_window, list);
  147. end_offset = window->offset +
  148. (window->nr_pages << PAGE_SHIFT);
  149. if (offset < window->offset)
  150. /* Offset not found! */
  151. return -ENXIO;
  152. if (offset >= end_offset)
  153. continue;
  154. /* Check read/write protections. */
  155. if ((window->prot & req->prot) != req->prot)
  156. return -EPERM;
  157. if (nr_bytes_left == req->nr_bytes)
  158. /* Store the first window */
  159. *req->out_window = window;
  160. tmp_min = min((u64)end_offset - offset, nr_bytes_left);
  161. nr_bytes_left -= tmp_min;
  162. offset += tmp_min;
  163. /*
  164. * Range requested encompasses
  165. * multiple windows contiguously.
  166. */
  167. if (!nr_bytes_left) {
  168. /* Done for partial window */
  169. if (req->type == SCIF_WINDOW_PARTIAL ||
  170. req->type == SCIF_WINDOW_SINGLE)
  171. return 0;
  172. /* Extra logic for full windows */
  173. if (offset == end_offset)
  174. /* Spanning multiple whole windows */
  175. return 0;
  176. /* Not spanning multiple whole windows */
  177. return -ENXIO;
  178. }
  179. if (req->type == SCIF_WINDOW_SINGLE)
  180. break;
  181. }
  182. dev_err(scif_info.mdev.this_device,
  183. "%s %d ENXIO\n", __func__, __LINE__);
  184. return -ENXIO;
  185. }
  186. /*
  187. * scif_rma_list_unregister:
  188. *
  189. * Traverse the self registration list starting from window:
  190. * 1) Call scif_unregister_window(..)
  191. * RMA lock must be held.
  192. */
  193. int scif_rma_list_unregister(struct scif_window *window,
  194. s64 offset, int nr_pages)
  195. {
  196. struct scif_endpt *ep = (struct scif_endpt *)window->ep;
  197. struct list_head *head = &ep->rma_info.reg_list;
  198. s64 end_offset;
  199. int err = 0;
  200. int loop_nr_pages;
  201. struct scif_window *_window;
  202. list_for_each_entry_safe_from(window, _window, head, list) {
  203. end_offset = window->offset + (window->nr_pages << PAGE_SHIFT);
  204. loop_nr_pages = min((int)((end_offset - offset) >> PAGE_SHIFT),
  205. nr_pages);
  206. err = scif_unregister_window(window);
  207. if (err)
  208. return err;
  209. nr_pages -= loop_nr_pages;
  210. offset += (loop_nr_pages << PAGE_SHIFT);
  211. if (!nr_pages)
  212. break;
  213. }
  214. return 0;
  215. }
  216. /*
  217. * scif_unmap_all_window:
  218. *
  219. * Traverse all the windows in the self registration list and:
  220. * 1) Delete any DMA mappings created
  221. */
  222. void scif_unmap_all_windows(scif_epd_t epd)
  223. {
  224. struct list_head *item, *tmp;
  225. struct scif_window *window;
  226. struct scif_endpt *ep = (struct scif_endpt *)epd;
  227. struct list_head *head = &ep->rma_info.reg_list;
  228. mutex_lock(&ep->rma_info.rma_lock);
  229. list_for_each_safe(item, tmp, head) {
  230. window = list_entry(item, struct scif_window, list);
  231. scif_unmap_window(ep->remote_dev, window);
  232. }
  233. mutex_unlock(&ep->rma_info.rma_lock);
  234. }
  235. /*
  236. * scif_unregister_all_window:
  237. *
  238. * Traverse all the windows in the self registration list and:
  239. * 1) Call scif_unregister_window(..)
  240. * RMA lock must be held.
  241. */
  242. int scif_unregister_all_windows(scif_epd_t epd)
  243. {
  244. struct list_head *item, *tmp;
  245. struct scif_window *window;
  246. struct scif_endpt *ep = (struct scif_endpt *)epd;
  247. struct list_head *head = &ep->rma_info.reg_list;
  248. int err = 0;
  249. mutex_lock(&ep->rma_info.rma_lock);
  250. retry:
  251. item = NULL;
  252. tmp = NULL;
  253. list_for_each_safe(item, tmp, head) {
  254. window = list_entry(item, struct scif_window, list);
  255. ep->rma_info.async_list_del = 0;
  256. err = scif_unregister_window(window);
  257. if (err)
  258. dev_err(scif_info.mdev.this_device,
  259. "%s %d err %d\n",
  260. __func__, __LINE__, err);
  261. /*
  262. * Need to restart list traversal if there has been
  263. * an asynchronous list entry deletion.
  264. */
  265. if (READ_ONCE(ep->rma_info.async_list_del))
  266. goto retry;
  267. }
  268. mutex_unlock(&ep->rma_info.rma_lock);
  269. if (!list_empty(&ep->rma_info.mmn_list)) {
  270. spin_lock(&scif_info.rmalock);
  271. list_add_tail(&ep->mmu_list, &scif_info.mmu_notif_cleanup);
  272. spin_unlock(&scif_info.rmalock);
  273. schedule_work(&scif_info.mmu_notif_work);
  274. }
  275. return err;
  276. }