vmwgfx_buffer.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887
  1. // SPDX-License-Identifier: GPL-2.0 OR MIT
  2. /**************************************************************************
  3. *
  4. * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_drv.h"
  28. #include <drm/ttm/ttm_bo_driver.h>
  29. #include <drm/ttm/ttm_placement.h>
  30. #include <drm/ttm/ttm_page_alloc.h>
  31. static const struct ttm_place vram_placement_flags = {
  32. .fpfn = 0,
  33. .lpfn = 0,
  34. .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
  35. };
  36. static const struct ttm_place vram_ne_placement_flags = {
  37. .fpfn = 0,
  38. .lpfn = 0,
  39. .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
  40. };
  41. static const struct ttm_place sys_placement_flags = {
  42. .fpfn = 0,
  43. .lpfn = 0,
  44. .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
  45. };
  46. static const struct ttm_place sys_ne_placement_flags = {
  47. .fpfn = 0,
  48. .lpfn = 0,
  49. .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
  50. };
  51. static const struct ttm_place gmr_placement_flags = {
  52. .fpfn = 0,
  53. .lpfn = 0,
  54. .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
  55. };
  56. static const struct ttm_place gmr_ne_placement_flags = {
  57. .fpfn = 0,
  58. .lpfn = 0,
  59. .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
  60. };
  61. static const struct ttm_place mob_placement_flags = {
  62. .fpfn = 0,
  63. .lpfn = 0,
  64. .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
  65. };
  66. static const struct ttm_place mob_ne_placement_flags = {
  67. .fpfn = 0,
  68. .lpfn = 0,
  69. .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
  70. };
  71. struct ttm_placement vmw_vram_placement = {
  72. .num_placement = 1,
  73. .placement = &vram_placement_flags,
  74. .num_busy_placement = 1,
  75. .busy_placement = &vram_placement_flags
  76. };
  77. static const struct ttm_place vram_gmr_placement_flags[] = {
  78. {
  79. .fpfn = 0,
  80. .lpfn = 0,
  81. .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
  82. }, {
  83. .fpfn = 0,
  84. .lpfn = 0,
  85. .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
  86. }
  87. };
  88. static const struct ttm_place gmr_vram_placement_flags[] = {
  89. {
  90. .fpfn = 0,
  91. .lpfn = 0,
  92. .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
  93. }, {
  94. .fpfn = 0,
  95. .lpfn = 0,
  96. .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
  97. }
  98. };
  99. struct ttm_placement vmw_vram_gmr_placement = {
  100. .num_placement = 2,
  101. .placement = vram_gmr_placement_flags,
  102. .num_busy_placement = 1,
  103. .busy_placement = &gmr_placement_flags
  104. };
  105. static const struct ttm_place vram_gmr_ne_placement_flags[] = {
  106. {
  107. .fpfn = 0,
  108. .lpfn = 0,
  109. .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
  110. TTM_PL_FLAG_NO_EVICT
  111. }, {
  112. .fpfn = 0,
  113. .lpfn = 0,
  114. .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
  115. TTM_PL_FLAG_NO_EVICT
  116. }
  117. };
  118. struct ttm_placement vmw_vram_gmr_ne_placement = {
  119. .num_placement = 2,
  120. .placement = vram_gmr_ne_placement_flags,
  121. .num_busy_placement = 1,
  122. .busy_placement = &gmr_ne_placement_flags
  123. };
  124. struct ttm_placement vmw_vram_sys_placement = {
  125. .num_placement = 1,
  126. .placement = &vram_placement_flags,
  127. .num_busy_placement = 1,
  128. .busy_placement = &sys_placement_flags
  129. };
  130. struct ttm_placement vmw_vram_ne_placement = {
  131. .num_placement = 1,
  132. .placement = &vram_ne_placement_flags,
  133. .num_busy_placement = 1,
  134. .busy_placement = &vram_ne_placement_flags
  135. };
  136. struct ttm_placement vmw_sys_placement = {
  137. .num_placement = 1,
  138. .placement = &sys_placement_flags,
  139. .num_busy_placement = 1,
  140. .busy_placement = &sys_placement_flags
  141. };
  142. struct ttm_placement vmw_sys_ne_placement = {
  143. .num_placement = 1,
  144. .placement = &sys_ne_placement_flags,
  145. .num_busy_placement = 1,
  146. .busy_placement = &sys_ne_placement_flags
  147. };
  148. static const struct ttm_place evictable_placement_flags[] = {
  149. {
  150. .fpfn = 0,
  151. .lpfn = 0,
  152. .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
  153. }, {
  154. .fpfn = 0,
  155. .lpfn = 0,
  156. .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
  157. }, {
  158. .fpfn = 0,
  159. .lpfn = 0,
  160. .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
  161. }, {
  162. .fpfn = 0,
  163. .lpfn = 0,
  164. .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
  165. }
  166. };
  167. static const struct ttm_place nonfixed_placement_flags[] = {
  168. {
  169. .fpfn = 0,
  170. .lpfn = 0,
  171. .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
  172. }, {
  173. .fpfn = 0,
  174. .lpfn = 0,
  175. .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
  176. }, {
  177. .fpfn = 0,
  178. .lpfn = 0,
  179. .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
  180. }
  181. };
  182. struct ttm_placement vmw_evictable_placement = {
  183. .num_placement = 4,
  184. .placement = evictable_placement_flags,
  185. .num_busy_placement = 1,
  186. .busy_placement = &sys_placement_flags
  187. };
  188. struct ttm_placement vmw_srf_placement = {
  189. .num_placement = 1,
  190. .num_busy_placement = 2,
  191. .placement = &gmr_placement_flags,
  192. .busy_placement = gmr_vram_placement_flags
  193. };
  194. struct ttm_placement vmw_mob_placement = {
  195. .num_placement = 1,
  196. .num_busy_placement = 1,
  197. .placement = &mob_placement_flags,
  198. .busy_placement = &mob_placement_flags
  199. };
  200. struct ttm_placement vmw_mob_ne_placement = {
  201. .num_placement = 1,
  202. .num_busy_placement = 1,
  203. .placement = &mob_ne_placement_flags,
  204. .busy_placement = &mob_ne_placement_flags
  205. };
  206. struct ttm_placement vmw_nonfixed_placement = {
  207. .num_placement = 3,
  208. .placement = nonfixed_placement_flags,
  209. .num_busy_placement = 1,
  210. .busy_placement = &sys_placement_flags
  211. };
  212. struct vmw_ttm_tt {
  213. struct ttm_dma_tt dma_ttm;
  214. struct vmw_private *dev_priv;
  215. int gmr_id;
  216. struct vmw_mob *mob;
  217. int mem_type;
  218. struct sg_table sgt;
  219. struct vmw_sg_table vsgt;
  220. uint64_t sg_alloc_size;
  221. bool mapped;
  222. };
  223. const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
  224. /**
  225. * Helper functions to advance a struct vmw_piter iterator.
  226. *
  227. * @viter: Pointer to the iterator.
  228. *
  229. * These functions return false if past the end of the list,
  230. * true otherwise. Functions are selected depending on the current
  231. * DMA mapping mode.
  232. */
  233. static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
  234. {
  235. return ++(viter->i) < viter->num_pages;
  236. }
  237. static bool __vmw_piter_sg_next(struct vmw_piter *viter)
  238. {
  239. return __sg_page_iter_next(&viter->iter);
  240. }
  241. /**
  242. * Helper functions to return a pointer to the current page.
  243. *
  244. * @viter: Pointer to the iterator
  245. *
  246. * These functions return a pointer to the page currently
  247. * pointed to by @viter. Functions are selected depending on the
  248. * current mapping mode.
  249. */
  250. static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
  251. {
  252. return viter->pages[viter->i];
  253. }
  254. static struct page *__vmw_piter_sg_page(struct vmw_piter *viter)
  255. {
  256. return sg_page_iter_page(&viter->iter);
  257. }
  258. /**
  259. * Helper functions to return the DMA address of the current page.
  260. *
  261. * @viter: Pointer to the iterator
  262. *
  263. * These functions return the DMA address of the page currently
  264. * pointed to by @viter. Functions are selected depending on the
  265. * current mapping mode.
  266. */
  267. static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
  268. {
  269. return page_to_phys(viter->pages[viter->i]);
  270. }
  271. static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
  272. {
  273. return viter->addrs[viter->i];
  274. }
  275. static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
  276. {
  277. return sg_page_iter_dma_address(&viter->iter);
  278. }
  279. /**
  280. * vmw_piter_start - Initialize a struct vmw_piter.
  281. *
  282. * @viter: Pointer to the iterator to initialize
  283. * @vsgt: Pointer to a struct vmw_sg_table to initialize from
  284. *
  285. * Note that we're following the convention of __sg_page_iter_start, so that
  286. * the iterator doesn't point to a valid page after initialization; it has
  287. * to be advanced one step first.
  288. */
  289. void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
  290. unsigned long p_offset)
  291. {
  292. viter->i = p_offset - 1;
  293. viter->num_pages = vsgt->num_pages;
  294. switch (vsgt->mode) {
  295. case vmw_dma_phys:
  296. viter->next = &__vmw_piter_non_sg_next;
  297. viter->dma_address = &__vmw_piter_phys_addr;
  298. viter->page = &__vmw_piter_non_sg_page;
  299. viter->pages = vsgt->pages;
  300. break;
  301. case vmw_dma_alloc_coherent:
  302. viter->next = &__vmw_piter_non_sg_next;
  303. viter->dma_address = &__vmw_piter_dma_addr;
  304. viter->page = &__vmw_piter_non_sg_page;
  305. viter->addrs = vsgt->addrs;
  306. viter->pages = vsgt->pages;
  307. break;
  308. case vmw_dma_map_populate:
  309. case vmw_dma_map_bind:
  310. viter->next = &__vmw_piter_sg_next;
  311. viter->dma_address = &__vmw_piter_sg_addr;
  312. viter->page = &__vmw_piter_sg_page;
  313. __sg_page_iter_start(&viter->iter, vsgt->sgt->sgl,
  314. vsgt->sgt->orig_nents, p_offset);
  315. break;
  316. default:
  317. BUG();
  318. }
  319. }
  320. /**
  321. * vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for
  322. * TTM pages
  323. *
  324. * @vmw_tt: Pointer to a struct vmw_ttm_backend
  325. *
  326. * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
  327. */
  328. static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
  329. {
  330. struct device *dev = vmw_tt->dev_priv->dev->dev;
  331. dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
  332. DMA_BIDIRECTIONAL);
  333. vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
  334. }
  335. /**
  336. * vmw_ttm_map_for_dma - map TTM pages to get device addresses
  337. *
  338. * @vmw_tt: Pointer to a struct vmw_ttm_backend
  339. *
  340. * This function is used to get device addresses from the kernel DMA layer.
  341. * However, it's violating the DMA API in that when this operation has been
  342. * performed, it's illegal for the CPU to write to the pages without first
  343. * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
  344. * therefore only legal to call this function if we know that the function
  345. * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
  346. * a CPU write buffer flush.
  347. */
  348. static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
  349. {
  350. struct device *dev = vmw_tt->dev_priv->dev->dev;
  351. int ret;
  352. ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
  353. DMA_BIDIRECTIONAL);
  354. if (unlikely(ret == 0))
  355. return -ENOMEM;
  356. vmw_tt->sgt.nents = ret;
  357. return 0;
  358. }
  359. /**
  360. * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
  361. *
  362. * @vmw_tt: Pointer to a struct vmw_ttm_tt
  363. *
  364. * Select the correct function for and make sure the TTM pages are
  365. * visible to the device. Allocate storage for the device mappings.
  366. * If a mapping has already been performed, indicated by the storage
  367. * pointer being non NULL, the function returns success.
  368. */
  369. static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
  370. {
  371. struct vmw_private *dev_priv = vmw_tt->dev_priv;
  372. struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
  373. struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
  374. struct ttm_operation_ctx ctx = {
  375. .interruptible = true,
  376. .no_wait_gpu = false
  377. };
  378. struct vmw_piter iter;
  379. dma_addr_t old;
  380. int ret = 0;
  381. static size_t sgl_size;
  382. static size_t sgt_size;
  383. if (vmw_tt->mapped)
  384. return 0;
  385. vsgt->mode = dev_priv->map_mode;
  386. vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
  387. vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
  388. vsgt->addrs = vmw_tt->dma_ttm.dma_address;
  389. vsgt->sgt = &vmw_tt->sgt;
  390. switch (dev_priv->map_mode) {
  391. case vmw_dma_map_bind:
  392. case vmw_dma_map_populate:
  393. if (unlikely(!sgl_size)) {
  394. sgl_size = ttm_round_pot(sizeof(struct scatterlist));
  395. sgt_size = ttm_round_pot(sizeof(struct sg_table));
  396. }
  397. vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
  398. ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx);
  399. if (unlikely(ret != 0))
  400. return ret;
  401. ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
  402. vsgt->num_pages, 0,
  403. (unsigned long)
  404. vsgt->num_pages << PAGE_SHIFT,
  405. GFP_KERNEL);
  406. if (unlikely(ret != 0))
  407. goto out_sg_alloc_fail;
  408. if (vsgt->num_pages > vmw_tt->sgt.nents) {
  409. uint64_t over_alloc =
  410. sgl_size * (vsgt->num_pages -
  411. vmw_tt->sgt.nents);
  412. ttm_mem_global_free(glob, over_alloc);
  413. vmw_tt->sg_alloc_size -= over_alloc;
  414. }
  415. ret = vmw_ttm_map_for_dma(vmw_tt);
  416. if (unlikely(ret != 0))
  417. goto out_map_fail;
  418. break;
  419. default:
  420. break;
  421. }
  422. old = ~((dma_addr_t) 0);
  423. vmw_tt->vsgt.num_regions = 0;
  424. for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
  425. dma_addr_t cur = vmw_piter_dma_addr(&iter);
  426. if (cur != old + PAGE_SIZE)
  427. vmw_tt->vsgt.num_regions++;
  428. old = cur;
  429. }
  430. vmw_tt->mapped = true;
  431. return 0;
  432. out_map_fail:
  433. sg_free_table(vmw_tt->vsgt.sgt);
  434. vmw_tt->vsgt.sgt = NULL;
  435. out_sg_alloc_fail:
  436. ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
  437. return ret;
  438. }
  439. /**
  440. * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
  441. *
  442. * @vmw_tt: Pointer to a struct vmw_ttm_tt
  443. *
  444. * Tear down any previously set up device DMA mappings and free
  445. * any storage space allocated for them. If there are no mappings set up,
  446. * this function is a NOP.
  447. */
  448. static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
  449. {
  450. struct vmw_private *dev_priv = vmw_tt->dev_priv;
  451. if (!vmw_tt->vsgt.sgt)
  452. return;
  453. switch (dev_priv->map_mode) {
  454. case vmw_dma_map_bind:
  455. case vmw_dma_map_populate:
  456. vmw_ttm_unmap_from_dma(vmw_tt);
  457. sg_free_table(vmw_tt->vsgt.sgt);
  458. vmw_tt->vsgt.sgt = NULL;
  459. ttm_mem_global_free(vmw_mem_glob(dev_priv),
  460. vmw_tt->sg_alloc_size);
  461. break;
  462. default:
  463. break;
  464. }
  465. vmw_tt->mapped = false;
  466. }
  467. /**
  468. * vmw_bo_map_dma - Make sure buffer object pages are visible to the device
  469. *
  470. * @bo: Pointer to a struct ttm_buffer_object
  471. *
  472. * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer
  473. * instead of a pointer to a struct vmw_ttm_backend as argument.
  474. * Note that the buffer object must be either pinned or reserved before
  475. * calling this function.
  476. */
  477. int vmw_bo_map_dma(struct ttm_buffer_object *bo)
  478. {
  479. struct vmw_ttm_tt *vmw_tt =
  480. container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
  481. return vmw_ttm_map_dma(vmw_tt);
  482. }
  483. /**
  484. * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device
  485. *
  486. * @bo: Pointer to a struct ttm_buffer_object
  487. *
  488. * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer
  489. * instead of a pointer to a struct vmw_ttm_backend as argument.
  490. */
  491. void vmw_bo_unmap_dma(struct ttm_buffer_object *bo)
  492. {
  493. struct vmw_ttm_tt *vmw_tt =
  494. container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
  495. vmw_ttm_unmap_dma(vmw_tt);
  496. }
  497. /**
  498. * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
  499. * TTM buffer object
  500. *
  501. * @bo: Pointer to a struct ttm_buffer_object
  502. *
  503. * Returns a pointer to a struct vmw_sg_table object. The object should
  504. * not be freed after use.
  505. * Note that for the device addresses to be valid, the buffer object must
  506. * either be reserved or pinned.
  507. */
  508. const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
  509. {
  510. struct vmw_ttm_tt *vmw_tt =
  511. container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
  512. return &vmw_tt->vsgt;
  513. }
  514. static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
  515. {
  516. struct vmw_ttm_tt *vmw_be =
  517. container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
  518. int ret;
  519. ret = vmw_ttm_map_dma(vmw_be);
  520. if (unlikely(ret != 0))
  521. return ret;
  522. vmw_be->gmr_id = bo_mem->start;
  523. vmw_be->mem_type = bo_mem->mem_type;
  524. switch (bo_mem->mem_type) {
  525. case VMW_PL_GMR:
  526. return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
  527. ttm->num_pages, vmw_be->gmr_id);
  528. case VMW_PL_MOB:
  529. if (unlikely(vmw_be->mob == NULL)) {
  530. vmw_be->mob =
  531. vmw_mob_create(ttm->num_pages);
  532. if (unlikely(vmw_be->mob == NULL))
  533. return -ENOMEM;
  534. }
  535. return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
  536. &vmw_be->vsgt, ttm->num_pages,
  537. vmw_be->gmr_id);
  538. default:
  539. BUG();
  540. }
  541. return 0;
  542. }
  543. static int vmw_ttm_unbind(struct ttm_tt *ttm)
  544. {
  545. struct vmw_ttm_tt *vmw_be =
  546. container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
  547. switch (vmw_be->mem_type) {
  548. case VMW_PL_GMR:
  549. vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
  550. break;
  551. case VMW_PL_MOB:
  552. vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
  553. break;
  554. default:
  555. BUG();
  556. }
  557. if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
  558. vmw_ttm_unmap_dma(vmw_be);
  559. return 0;
  560. }
  561. static void vmw_ttm_destroy(struct ttm_tt *ttm)
  562. {
  563. struct vmw_ttm_tt *vmw_be =
  564. container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
  565. vmw_ttm_unmap_dma(vmw_be);
  566. if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
  567. ttm_dma_tt_fini(&vmw_be->dma_ttm);
  568. else
  569. ttm_tt_fini(ttm);
  570. if (vmw_be->mob)
  571. vmw_mob_destroy(vmw_be->mob);
  572. kfree(vmw_be);
  573. }
  574. static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
  575. {
  576. struct vmw_ttm_tt *vmw_tt =
  577. container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
  578. struct vmw_private *dev_priv = vmw_tt->dev_priv;
  579. struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
  580. int ret;
  581. if (ttm->state != tt_unpopulated)
  582. return 0;
  583. if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
  584. size_t size =
  585. ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
  586. ret = ttm_mem_global_alloc(glob, size, ctx);
  587. if (unlikely(ret != 0))
  588. return ret;
  589. ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev,
  590. ctx);
  591. if (unlikely(ret != 0))
  592. ttm_mem_global_free(glob, size);
  593. } else
  594. ret = ttm_pool_populate(ttm, ctx);
  595. return ret;
  596. }
  597. static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
  598. {
  599. struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
  600. dma_ttm.ttm);
  601. struct vmw_private *dev_priv = vmw_tt->dev_priv;
  602. struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
  603. if (vmw_tt->mob) {
  604. vmw_mob_destroy(vmw_tt->mob);
  605. vmw_tt->mob = NULL;
  606. }
  607. vmw_ttm_unmap_dma(vmw_tt);
  608. if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
  609. size_t size =
  610. ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
  611. ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
  612. ttm_mem_global_free(glob, size);
  613. } else
  614. ttm_pool_unpopulate(ttm);
  615. }
  616. static struct ttm_backend_func vmw_ttm_func = {
  617. .bind = vmw_ttm_bind,
  618. .unbind = vmw_ttm_unbind,
  619. .destroy = vmw_ttm_destroy,
  620. };
  621. static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
  622. uint32_t page_flags)
  623. {
  624. struct vmw_ttm_tt *vmw_be;
  625. int ret;
  626. vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
  627. if (!vmw_be)
  628. return NULL;
  629. vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
  630. vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
  631. vmw_be->mob = NULL;
  632. if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
  633. ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags);
  634. else
  635. ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags);
  636. if (unlikely(ret != 0))
  637. goto out_no_init;
  638. return &vmw_be->dma_ttm.ttm;
  639. out_no_init:
  640. kfree(vmw_be);
  641. return NULL;
  642. }
  643. static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
  644. {
  645. return 0;
  646. }
  647. static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
  648. struct ttm_mem_type_manager *man)
  649. {
  650. switch (type) {
  651. case TTM_PL_SYSTEM:
  652. /* System memory */
  653. man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
  654. man->available_caching = TTM_PL_FLAG_CACHED;
  655. man->default_caching = TTM_PL_FLAG_CACHED;
  656. break;
  657. case TTM_PL_VRAM:
  658. /* "On-card" video ram */
  659. man->func = &ttm_bo_manager_func;
  660. man->gpu_offset = 0;
  661. man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
  662. man->available_caching = TTM_PL_FLAG_CACHED;
  663. man->default_caching = TTM_PL_FLAG_CACHED;
  664. break;
  665. case VMW_PL_GMR:
  666. case VMW_PL_MOB:
  667. /*
  668. * "Guest Memory Regions" is an aperture like feature with
  669. * one slot per bo. There is an upper limit of the number of
  670. * slots as well as the bo size.
  671. */
  672. man->func = &vmw_gmrid_manager_func;
  673. man->gpu_offset = 0;
  674. man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
  675. man->available_caching = TTM_PL_FLAG_CACHED;
  676. man->default_caching = TTM_PL_FLAG_CACHED;
  677. break;
  678. default:
  679. DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
  680. return -EINVAL;
  681. }
  682. return 0;
  683. }
  684. static void vmw_evict_flags(struct ttm_buffer_object *bo,
  685. struct ttm_placement *placement)
  686. {
  687. *placement = vmw_sys_placement;
  688. }
  689. static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
  690. {
  691. struct ttm_object_file *tfile =
  692. vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
  693. return vmw_user_dmabuf_verify_access(bo, tfile);
  694. }
  695. static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  696. {
  697. struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
  698. struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
  699. mem->bus.addr = NULL;
  700. mem->bus.is_iomem = false;
  701. mem->bus.offset = 0;
  702. mem->bus.size = mem->num_pages << PAGE_SHIFT;
  703. mem->bus.base = 0;
  704. if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
  705. return -EINVAL;
  706. switch (mem->mem_type) {
  707. case TTM_PL_SYSTEM:
  708. case VMW_PL_GMR:
  709. case VMW_PL_MOB:
  710. return 0;
  711. case TTM_PL_VRAM:
  712. mem->bus.offset = mem->start << PAGE_SHIFT;
  713. mem->bus.base = dev_priv->vram_start;
  714. mem->bus.is_iomem = true;
  715. break;
  716. default:
  717. return -EINVAL;
  718. }
  719. return 0;
  720. }
  721. static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  722. {
  723. }
  724. static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
  725. {
  726. return 0;
  727. }
  728. /**
  729. * vmw_move_notify - TTM move_notify_callback
  730. *
  731. * @bo: The TTM buffer object about to move.
  732. * @mem: The struct ttm_mem_reg indicating to what memory
  733. * region the move is taking place.
  734. *
  735. * Calls move_notify for all subsystems needing it.
  736. * (currently only resources).
  737. */
  738. static void vmw_move_notify(struct ttm_buffer_object *bo,
  739. bool evict,
  740. struct ttm_mem_reg *mem)
  741. {
  742. vmw_resource_move_notify(bo, mem);
  743. vmw_query_move_notify(bo, mem);
  744. }
  745. /**
  746. * vmw_swap_notify - TTM move_notify_callback
  747. *
  748. * @bo: The TTM buffer object about to be swapped out.
  749. */
  750. static void vmw_swap_notify(struct ttm_buffer_object *bo)
  751. {
  752. vmw_resource_swap_notify(bo);
  753. (void) ttm_bo_wait(bo, false, false);
  754. }
  755. struct ttm_bo_driver vmw_bo_driver = {
  756. .ttm_tt_create = &vmw_ttm_tt_create,
  757. .ttm_tt_populate = &vmw_ttm_populate,
  758. .ttm_tt_unpopulate = &vmw_ttm_unpopulate,
  759. .invalidate_caches = vmw_invalidate_caches,
  760. .init_mem_type = vmw_init_mem_type,
  761. .eviction_valuable = ttm_bo_eviction_valuable,
  762. .evict_flags = vmw_evict_flags,
  763. .move = NULL,
  764. .verify_access = vmw_verify_access,
  765. .move_notify = vmw_move_notify,
  766. .swap_notify = vmw_swap_notify,
  767. .fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
  768. .io_mem_reserve = &vmw_ttm_io_mem_reserve,
  769. .io_mem_free = &vmw_ttm_io_mem_free,
  770. };