xen_drm_front_shbuf.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414
  1. // SPDX-License-Identifier: GPL-2.0 OR MIT
  2. /*
  3. * Xen para-virtual DRM device
  4. *
  5. * Copyright (C) 2016-2018 EPAM Systems Inc.
  6. *
  7. * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
  8. */
  9. #include <drm/drmP.h>
  10. #if defined(CONFIG_X86)
  11. #include <drm/drm_cache.h>
  12. #endif
  13. #include <linux/errno.h>
  14. #include <linux/mm.h>
  15. #include <asm/xen/hypervisor.h>
  16. #include <xen/balloon.h>
  17. #include <xen/xen.h>
  18. #include <xen/xenbus.h>
  19. #include <xen/interface/io/ring.h>
  20. #include <xen/interface/io/displif.h>
  21. #include "xen_drm_front.h"
  22. #include "xen_drm_front_shbuf.h"
  23. struct xen_drm_front_shbuf_ops {
  24. /*
  25. * Calculate number of grefs required to handle this buffer,
  26. * e.g. if grefs are required for page directory only or the buffer
  27. * pages as well.
  28. */
  29. void (*calc_num_grefs)(struct xen_drm_front_shbuf *buf);
  30. /* Fill page directory according to para-virtual display protocol. */
  31. void (*fill_page_dir)(struct xen_drm_front_shbuf *buf);
  32. /* Claim grant references for the pages of the buffer. */
  33. int (*grant_refs_for_buffer)(struct xen_drm_front_shbuf *buf,
  34. grant_ref_t *priv_gref_head, int gref_idx);
  35. /* Map grant references of the buffer. */
  36. int (*map)(struct xen_drm_front_shbuf *buf);
  37. /* Unmap grant references of the buffer. */
  38. int (*unmap)(struct xen_drm_front_shbuf *buf);
  39. };
  40. grant_ref_t xen_drm_front_shbuf_get_dir_start(struct xen_drm_front_shbuf *buf)
  41. {
  42. if (!buf->grefs)
  43. return GRANT_INVALID_REF;
  44. return buf->grefs[0];
  45. }
  46. int xen_drm_front_shbuf_map(struct xen_drm_front_shbuf *buf)
  47. {
  48. if (buf->ops->map)
  49. return buf->ops->map(buf);
  50. /* no need to map own grant references */
  51. return 0;
  52. }
  53. int xen_drm_front_shbuf_unmap(struct xen_drm_front_shbuf *buf)
  54. {
  55. if (buf->ops->unmap)
  56. return buf->ops->unmap(buf);
  57. /* no need to unmap own grant references */
  58. return 0;
  59. }
  60. void xen_drm_front_shbuf_flush(struct xen_drm_front_shbuf *buf)
  61. {
  62. #if defined(CONFIG_X86)
  63. drm_clflush_pages(buf->pages, buf->num_pages);
  64. #endif
  65. }
  66. void xen_drm_front_shbuf_free(struct xen_drm_front_shbuf *buf)
  67. {
  68. if (buf->grefs) {
  69. int i;
  70. for (i = 0; i < buf->num_grefs; i++)
  71. if (buf->grefs[i] != GRANT_INVALID_REF)
  72. gnttab_end_foreign_access(buf->grefs[i],
  73. 0, 0UL);
  74. }
  75. kfree(buf->grefs);
  76. kfree(buf->directory);
  77. kfree(buf);
  78. }
  79. /*
  80. * number of grefs a page can hold with respect to the
  81. * struct xendispl_page_directory header
  82. */
  83. #define XEN_DRM_NUM_GREFS_PER_PAGE ((PAGE_SIZE - \
  84. offsetof(struct xendispl_page_directory, gref)) / \
  85. sizeof(grant_ref_t))
  86. static int get_num_pages_dir(struct xen_drm_front_shbuf *buf)
  87. {
  88. /* number of pages the page directory consumes itself */
  89. return DIV_ROUND_UP(buf->num_pages, XEN_DRM_NUM_GREFS_PER_PAGE);
  90. }
  91. static void backend_calc_num_grefs(struct xen_drm_front_shbuf *buf)
  92. {
  93. /* only for pages the page directory consumes itself */
  94. buf->num_grefs = get_num_pages_dir(buf);
  95. }
  96. static void guest_calc_num_grefs(struct xen_drm_front_shbuf *buf)
  97. {
  98. /*
  99. * number of pages the page directory consumes itself
  100. * plus grefs for the buffer pages
  101. */
  102. buf->num_grefs = get_num_pages_dir(buf) + buf->num_pages;
  103. }
  104. #define xen_page_to_vaddr(page) \
  105. ((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
  106. static int backend_unmap(struct xen_drm_front_shbuf *buf)
  107. {
  108. struct gnttab_unmap_grant_ref *unmap_ops;
  109. int i, ret;
  110. if (!buf->pages || !buf->backend_map_handles || !buf->grefs)
  111. return 0;
  112. unmap_ops = kcalloc(buf->num_pages, sizeof(*unmap_ops),
  113. GFP_KERNEL);
  114. if (!unmap_ops) {
  115. DRM_ERROR("Failed to get memory while unmapping\n");
  116. return -ENOMEM;
  117. }
  118. for (i = 0; i < buf->num_pages; i++) {
  119. phys_addr_t addr;
  120. addr = xen_page_to_vaddr(buf->pages[i]);
  121. gnttab_set_unmap_op(&unmap_ops[i], addr, GNTMAP_host_map,
  122. buf->backend_map_handles[i]);
  123. }
  124. ret = gnttab_unmap_refs(unmap_ops, NULL, buf->pages,
  125. buf->num_pages);
  126. for (i = 0; i < buf->num_pages; i++) {
  127. if (unlikely(unmap_ops[i].status != GNTST_okay))
  128. DRM_ERROR("Failed to unmap page %d: %d\n",
  129. i, unmap_ops[i].status);
  130. }
  131. if (ret)
  132. DRM_ERROR("Failed to unmap grant references, ret %d", ret);
  133. kfree(unmap_ops);
  134. kfree(buf->backend_map_handles);
  135. buf->backend_map_handles = NULL;
  136. return ret;
  137. }
  138. static int backend_map(struct xen_drm_front_shbuf *buf)
  139. {
  140. struct gnttab_map_grant_ref *map_ops = NULL;
  141. unsigned char *ptr;
  142. int ret, cur_gref, cur_dir_page, cur_page, grefs_left;
  143. map_ops = kcalloc(buf->num_pages, sizeof(*map_ops), GFP_KERNEL);
  144. if (!map_ops)
  145. return -ENOMEM;
  146. buf->backend_map_handles = kcalloc(buf->num_pages,
  147. sizeof(*buf->backend_map_handles),
  148. GFP_KERNEL);
  149. if (!buf->backend_map_handles) {
  150. kfree(map_ops);
  151. return -ENOMEM;
  152. }
  153. /*
  154. * read page directory to get grefs from the backend: for external
  155. * buffer we only allocate buf->grefs for the page directory,
  156. * so buf->num_grefs has number of pages in the page directory itself
  157. */
  158. ptr = buf->directory;
  159. grefs_left = buf->num_pages;
  160. cur_page = 0;
  161. for (cur_dir_page = 0; cur_dir_page < buf->num_grefs; cur_dir_page++) {
  162. struct xendispl_page_directory *page_dir =
  163. (struct xendispl_page_directory *)ptr;
  164. int to_copy = XEN_DRM_NUM_GREFS_PER_PAGE;
  165. if (to_copy > grefs_left)
  166. to_copy = grefs_left;
  167. for (cur_gref = 0; cur_gref < to_copy; cur_gref++) {
  168. phys_addr_t addr;
  169. addr = xen_page_to_vaddr(buf->pages[cur_page]);
  170. gnttab_set_map_op(&map_ops[cur_page], addr,
  171. GNTMAP_host_map,
  172. page_dir->gref[cur_gref],
  173. buf->xb_dev->otherend_id);
  174. cur_page++;
  175. }
  176. grefs_left -= to_copy;
  177. ptr += PAGE_SIZE;
  178. }
  179. ret = gnttab_map_refs(map_ops, NULL, buf->pages, buf->num_pages);
  180. /* save handles even if error, so we can unmap */
  181. for (cur_page = 0; cur_page < buf->num_pages; cur_page++) {
  182. buf->backend_map_handles[cur_page] = map_ops[cur_page].handle;
  183. if (unlikely(map_ops[cur_page].status != GNTST_okay))
  184. DRM_ERROR("Failed to map page %d: %d\n",
  185. cur_page, map_ops[cur_page].status);
  186. }
  187. if (ret) {
  188. DRM_ERROR("Failed to map grant references, ret %d", ret);
  189. backend_unmap(buf);
  190. }
  191. kfree(map_ops);
  192. return ret;
  193. }
  194. static void backend_fill_page_dir(struct xen_drm_front_shbuf *buf)
  195. {
  196. struct xendispl_page_directory *page_dir;
  197. unsigned char *ptr;
  198. int i, num_pages_dir;
  199. ptr = buf->directory;
  200. num_pages_dir = get_num_pages_dir(buf);
  201. /* fill only grefs for the page directory itself */
  202. for (i = 0; i < num_pages_dir - 1; i++) {
  203. page_dir = (struct xendispl_page_directory *)ptr;
  204. page_dir->gref_dir_next_page = buf->grefs[i + 1];
  205. ptr += PAGE_SIZE;
  206. }
  207. /* last page must say there is no more pages */
  208. page_dir = (struct xendispl_page_directory *)ptr;
  209. page_dir->gref_dir_next_page = GRANT_INVALID_REF;
  210. }
  211. static void guest_fill_page_dir(struct xen_drm_front_shbuf *buf)
  212. {
  213. unsigned char *ptr;
  214. int cur_gref, grefs_left, to_copy, i, num_pages_dir;
  215. ptr = buf->directory;
  216. num_pages_dir = get_num_pages_dir(buf);
  217. /*
  218. * while copying, skip grefs at start, they are for pages
  219. * granted for the page directory itself
  220. */
  221. cur_gref = num_pages_dir;
  222. grefs_left = buf->num_pages;
  223. for (i = 0; i < num_pages_dir; i++) {
  224. struct xendispl_page_directory *page_dir =
  225. (struct xendispl_page_directory *)ptr;
  226. if (grefs_left <= XEN_DRM_NUM_GREFS_PER_PAGE) {
  227. to_copy = grefs_left;
  228. page_dir->gref_dir_next_page = GRANT_INVALID_REF;
  229. } else {
  230. to_copy = XEN_DRM_NUM_GREFS_PER_PAGE;
  231. page_dir->gref_dir_next_page = buf->grefs[i + 1];
  232. }
  233. memcpy(&page_dir->gref, &buf->grefs[cur_gref],
  234. to_copy * sizeof(grant_ref_t));
  235. ptr += PAGE_SIZE;
  236. grefs_left -= to_copy;
  237. cur_gref += to_copy;
  238. }
  239. }
  240. static int guest_grant_refs_for_buffer(struct xen_drm_front_shbuf *buf,
  241. grant_ref_t *priv_gref_head,
  242. int gref_idx)
  243. {
  244. int i, cur_ref, otherend_id;
  245. otherend_id = buf->xb_dev->otherend_id;
  246. for (i = 0; i < buf->num_pages; i++) {
  247. cur_ref = gnttab_claim_grant_reference(priv_gref_head);
  248. if (cur_ref < 0)
  249. return cur_ref;
  250. gnttab_grant_foreign_access_ref(cur_ref, otherend_id,
  251. xen_page_to_gfn(buf->pages[i]),
  252. 0);
  253. buf->grefs[gref_idx++] = cur_ref;
  254. }
  255. return 0;
  256. }
  257. static int grant_references(struct xen_drm_front_shbuf *buf)
  258. {
  259. grant_ref_t priv_gref_head;
  260. int ret, i, j, cur_ref;
  261. int otherend_id, num_pages_dir;
  262. ret = gnttab_alloc_grant_references(buf->num_grefs, &priv_gref_head);
  263. if (ret < 0) {
  264. DRM_ERROR("Cannot allocate grant references\n");
  265. return ret;
  266. }
  267. otherend_id = buf->xb_dev->otherend_id;
  268. j = 0;
  269. num_pages_dir = get_num_pages_dir(buf);
  270. for (i = 0; i < num_pages_dir; i++) {
  271. unsigned long frame;
  272. cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
  273. if (cur_ref < 0)
  274. return cur_ref;
  275. frame = xen_page_to_gfn(virt_to_page(buf->directory +
  276. PAGE_SIZE * i));
  277. gnttab_grant_foreign_access_ref(cur_ref, otherend_id, frame, 0);
  278. buf->grefs[j++] = cur_ref;
  279. }
  280. if (buf->ops->grant_refs_for_buffer) {
  281. ret = buf->ops->grant_refs_for_buffer(buf, &priv_gref_head, j);
  282. if (ret)
  283. return ret;
  284. }
  285. gnttab_free_grant_references(priv_gref_head);
  286. return 0;
  287. }
  288. static int alloc_storage(struct xen_drm_front_shbuf *buf)
  289. {
  290. buf->grefs = kcalloc(buf->num_grefs, sizeof(*buf->grefs), GFP_KERNEL);
  291. if (!buf->grefs)
  292. return -ENOMEM;
  293. buf->directory = kcalloc(get_num_pages_dir(buf), PAGE_SIZE, GFP_KERNEL);
  294. if (!buf->directory)
  295. return -ENOMEM;
  296. return 0;
  297. }
  298. /*
  299. * For be allocated buffers we don't need grant_refs_for_buffer as those
  300. * grant references are allocated at backend side
  301. */
  302. static const struct xen_drm_front_shbuf_ops backend_ops = {
  303. .calc_num_grefs = backend_calc_num_grefs,
  304. .fill_page_dir = backend_fill_page_dir,
  305. .map = backend_map,
  306. .unmap = backend_unmap
  307. };
  308. /* For locally granted references we do not need to map/unmap the references */
  309. static const struct xen_drm_front_shbuf_ops local_ops = {
  310. .calc_num_grefs = guest_calc_num_grefs,
  311. .fill_page_dir = guest_fill_page_dir,
  312. .grant_refs_for_buffer = guest_grant_refs_for_buffer,
  313. };
  314. struct xen_drm_front_shbuf *
  315. xen_drm_front_shbuf_alloc(struct xen_drm_front_shbuf_cfg *cfg)
  316. {
  317. struct xen_drm_front_shbuf *buf;
  318. int ret;
  319. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  320. if (!buf)
  321. return ERR_PTR(-ENOMEM);
  322. if (cfg->be_alloc)
  323. buf->ops = &backend_ops;
  324. else
  325. buf->ops = &local_ops;
  326. buf->xb_dev = cfg->xb_dev;
  327. buf->num_pages = DIV_ROUND_UP(cfg->size, PAGE_SIZE);
  328. buf->pages = cfg->pages;
  329. buf->ops->calc_num_grefs(buf);
  330. ret = alloc_storage(buf);
  331. if (ret)
  332. goto fail;
  333. ret = grant_references(buf);
  334. if (ret)
  335. goto fail;
  336. buf->ops->fill_page_dir(buf);
  337. return buf;
  338. fail:
  339. xen_drm_front_shbuf_free(buf);
  340. return ERR_PTR(ret);
  341. }