vmwgfx_mob.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656
  1. /**************************************************************************
  2. *
  3. * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_drv.h"
  28. /*
  29. * If we set up the screen target otable, screen objects stop working.
  30. */
  31. #define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE) ? 0 : 1)
  32. #ifdef CONFIG_64BIT
  33. #define VMW_PPN_SIZE 8
  34. #define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH64_0
  35. #define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH64_1
  36. #define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH64_2
  37. #else
  38. #define VMW_PPN_SIZE 4
  39. #define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH_0
  40. #define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH_1
  41. #define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH_2
  42. #endif
  43. /*
  44. * struct vmw_mob - Structure containing page table and metadata for a
  45. * Guest Memory OBject.
  46. *
  47. * @num_pages Number of pages that make up the page table.
  48. * @pt_level The indirection level of the page table. 0-2.
  49. * @pt_root_page DMA address of the level 0 page of the page table.
  50. */
  51. struct vmw_mob {
  52. struct ttm_buffer_object *pt_bo;
  53. unsigned long num_pages;
  54. unsigned pt_level;
  55. dma_addr_t pt_root_page;
  56. uint32_t id;
  57. };
  58. /*
  59. * struct vmw_otable - Guest Memory OBject table metadata
  60. *
  61. * @size: Size of the table (page-aligned).
  62. * @page_table: Pointer to a struct vmw_mob holding the page table.
  63. */
  64. struct vmw_otable {
  65. unsigned long size;
  66. struct vmw_mob *page_table;
  67. };
  68. static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
  69. struct vmw_mob *mob);
  70. static void vmw_mob_pt_setup(struct vmw_mob *mob,
  71. struct vmw_piter data_iter,
  72. unsigned long num_data_pages);
  73. /*
  74. * vmw_setup_otable_base - Issue an object table base setup command to
  75. * the device
  76. *
  77. * @dev_priv: Pointer to a device private structure
  78. * @type: Type of object table base
  79. * @offset Start of table offset into dev_priv::otable_bo
  80. * @otable Pointer to otable metadata;
  81. *
  82. * This function returns -ENOMEM if it fails to reserve fifo space,
  83. * and may block waiting for fifo space.
  84. */
  85. static int vmw_setup_otable_base(struct vmw_private *dev_priv,
  86. SVGAOTableType type,
  87. unsigned long offset,
  88. struct vmw_otable *otable)
  89. {
  90. struct {
  91. SVGA3dCmdHeader header;
  92. SVGA3dCmdSetOTableBase64 body;
  93. } *cmd;
  94. struct vmw_mob *mob;
  95. const struct vmw_sg_table *vsgt;
  96. struct vmw_piter iter;
  97. int ret;
  98. BUG_ON(otable->page_table != NULL);
  99. vsgt = vmw_bo_sg_table(dev_priv->otable_bo);
  100. vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
  101. WARN_ON(!vmw_piter_next(&iter));
  102. mob = vmw_mob_create(otable->size >> PAGE_SHIFT);
  103. if (unlikely(mob == NULL)) {
  104. DRM_ERROR("Failed creating OTable page table.\n");
  105. return -ENOMEM;
  106. }
  107. if (otable->size <= PAGE_SIZE) {
  108. mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
  109. mob->pt_root_page = vmw_piter_dma_addr(&iter);
  110. } else if (vsgt->num_regions == 1) {
  111. mob->pt_level = SVGA3D_MOBFMT_RANGE;
  112. mob->pt_root_page = vmw_piter_dma_addr(&iter);
  113. } else {
  114. ret = vmw_mob_pt_populate(dev_priv, mob);
  115. if (unlikely(ret != 0))
  116. goto out_no_populate;
  117. vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT);
  118. mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
  119. }
  120. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  121. if (unlikely(cmd == NULL)) {
  122. DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
  123. ret = -ENOMEM;
  124. goto out_no_fifo;
  125. }
  126. memset(cmd, 0, sizeof(*cmd));
  127. cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64;
  128. cmd->header.size = sizeof(cmd->body);
  129. cmd->body.type = type;
  130. cmd->body.baseAddress = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
  131. cmd->body.sizeInBytes = otable->size;
  132. cmd->body.validSizeInBytes = 0;
  133. cmd->body.ptDepth = mob->pt_level;
  134. /*
  135. * The device doesn't support this, But the otable size is
  136. * determined at compile-time, so this BUG shouldn't trigger
  137. * randomly.
  138. */
  139. BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2);
  140. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  141. otable->page_table = mob;
  142. return 0;
  143. out_no_fifo:
  144. out_no_populate:
  145. vmw_mob_destroy(mob);
  146. return ret;
  147. }
  148. /*
  149. * vmw_takedown_otable_base - Issue an object table base takedown command
  150. * to the device
  151. *
  152. * @dev_priv: Pointer to a device private structure
  153. * @type: Type of object table base
  154. *
  155. */
  156. static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
  157. SVGAOTableType type,
  158. struct vmw_otable *otable)
  159. {
  160. struct {
  161. SVGA3dCmdHeader header;
  162. SVGA3dCmdSetOTableBase body;
  163. } *cmd;
  164. struct ttm_buffer_object *bo;
  165. if (otable->page_table == NULL)
  166. return;
  167. bo = otable->page_table->pt_bo;
  168. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  169. if (unlikely(cmd == NULL)) {
  170. DRM_ERROR("Failed reserving FIFO space for OTable "
  171. "takedown.\n");
  172. } else {
  173. memset(cmd, 0, sizeof(*cmd));
  174. cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
  175. cmd->header.size = sizeof(cmd->body);
  176. cmd->body.type = type;
  177. cmd->body.baseAddress = 0;
  178. cmd->body.sizeInBytes = 0;
  179. cmd->body.validSizeInBytes = 0;
  180. cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
  181. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  182. }
  183. if (bo) {
  184. int ret;
  185. ret = ttm_bo_reserve(bo, false, true, false, NULL);
  186. BUG_ON(ret != 0);
  187. vmw_fence_single_bo(bo, NULL);
  188. ttm_bo_unreserve(bo);
  189. }
  190. vmw_mob_destroy(otable->page_table);
  191. otable->page_table = NULL;
  192. }
  193. /*
  194. * vmw_otables_setup - Set up guest backed memory object tables
  195. *
  196. * @dev_priv: Pointer to a device private structure
  197. *
  198. * Takes care of the device guest backed surface
  199. * initialization, by setting up the guest backed memory object tables.
  200. * Returns 0 on success and various error codes on failure. A succesful return
  201. * means the object tables can be taken down using the vmw_otables_takedown
  202. * function.
  203. */
  204. int vmw_otables_setup(struct vmw_private *dev_priv)
  205. {
  206. unsigned long offset;
  207. unsigned long bo_size;
  208. struct vmw_otable *otables;
  209. SVGAOTableType i;
  210. int ret;
  211. otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables),
  212. GFP_KERNEL);
  213. if (unlikely(otables == NULL)) {
  214. DRM_ERROR("Failed to allocate space for otable "
  215. "metadata.\n");
  216. return -ENOMEM;
  217. }
  218. otables[SVGA_OTABLE_MOB].size =
  219. VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE;
  220. otables[SVGA_OTABLE_SURFACE].size =
  221. VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE;
  222. otables[SVGA_OTABLE_CONTEXT].size =
  223. VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE;
  224. otables[SVGA_OTABLE_SHADER].size =
  225. VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE;
  226. otables[SVGA_OTABLE_SCREEN_TARGET].size =
  227. VMWGFX_NUM_GB_SCREEN_TARGET *
  228. SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE;
  229. bo_size = 0;
  230. for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) {
  231. otables[i].size =
  232. (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
  233. bo_size += otables[i].size;
  234. }
  235. ret = ttm_bo_create(&dev_priv->bdev, bo_size,
  236. ttm_bo_type_device,
  237. &vmw_sys_ne_placement,
  238. 0, false, NULL,
  239. &dev_priv->otable_bo);
  240. if (unlikely(ret != 0))
  241. goto out_no_bo;
  242. ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, NULL);
  243. BUG_ON(ret != 0);
  244. ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm);
  245. if (unlikely(ret != 0))
  246. goto out_unreserve;
  247. ret = vmw_bo_map_dma(dev_priv->otable_bo);
  248. if (unlikely(ret != 0))
  249. goto out_unreserve;
  250. ttm_bo_unreserve(dev_priv->otable_bo);
  251. offset = 0;
  252. for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) {
  253. ret = vmw_setup_otable_base(dev_priv, i, offset,
  254. &otables[i]);
  255. if (unlikely(ret != 0))
  256. goto out_no_setup;
  257. offset += otables[i].size;
  258. }
  259. dev_priv->otables = otables;
  260. return 0;
  261. out_unreserve:
  262. ttm_bo_unreserve(dev_priv->otable_bo);
  263. out_no_setup:
  264. for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
  265. vmw_takedown_otable_base(dev_priv, i, &otables[i]);
  266. ttm_bo_unref(&dev_priv->otable_bo);
  267. out_no_bo:
  268. kfree(otables);
  269. return ret;
  270. }
  271. /*
  272. * vmw_otables_takedown - Take down guest backed memory object tables
  273. *
  274. * @dev_priv: Pointer to a device private structure
  275. *
  276. * Take down the Guest Memory Object tables.
  277. */
  278. void vmw_otables_takedown(struct vmw_private *dev_priv)
  279. {
  280. SVGAOTableType i;
  281. struct ttm_buffer_object *bo = dev_priv->otable_bo;
  282. int ret;
  283. for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
  284. vmw_takedown_otable_base(dev_priv, i,
  285. &dev_priv->otables[i]);
  286. ret = ttm_bo_reserve(bo, false, true, false, NULL);
  287. BUG_ON(ret != 0);
  288. vmw_fence_single_bo(bo, NULL);
  289. ttm_bo_unreserve(bo);
  290. ttm_bo_unref(&dev_priv->otable_bo);
  291. kfree(dev_priv->otables);
  292. dev_priv->otables = NULL;
  293. }
  294. /*
  295. * vmw_mob_calculate_pt_pages - Calculate the number of page table pages
  296. * needed for a guest backed memory object.
  297. *
  298. * @data_pages: Number of data pages in the memory object buffer.
  299. */
  300. static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages)
  301. {
  302. unsigned long data_size = data_pages * PAGE_SIZE;
  303. unsigned long tot_size = 0;
  304. while (likely(data_size > PAGE_SIZE)) {
  305. data_size = DIV_ROUND_UP(data_size, PAGE_SIZE);
  306. data_size *= VMW_PPN_SIZE;
  307. tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK;
  308. }
  309. return tot_size >> PAGE_SHIFT;
  310. }
  311. /*
  312. * vmw_mob_create - Create a mob, but don't populate it.
  313. *
  314. * @data_pages: Number of data pages of the underlying buffer object.
  315. */
  316. struct vmw_mob *vmw_mob_create(unsigned long data_pages)
  317. {
  318. struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL);
  319. if (unlikely(mob == NULL))
  320. return NULL;
  321. mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
  322. return mob;
  323. }
  324. /*
  325. * vmw_mob_pt_populate - Populate the mob pagetable
  326. *
  327. * @mob: Pointer to the mob the pagetable of which we want to
  328. * populate.
  329. *
  330. * This function allocates memory to be used for the pagetable, and
  331. * adjusts TTM memory accounting accordingly. Returns ENOMEM if
  332. * memory resources aren't sufficient and may cause TTM buffer objects
  333. * to be swapped out by using the TTM memory accounting function.
  334. */
  335. static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
  336. struct vmw_mob *mob)
  337. {
  338. int ret;
  339. BUG_ON(mob->pt_bo != NULL);
  340. ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE,
  341. ttm_bo_type_device,
  342. &vmw_sys_ne_placement,
  343. 0, false, NULL, &mob->pt_bo);
  344. if (unlikely(ret != 0))
  345. return ret;
  346. ret = ttm_bo_reserve(mob->pt_bo, false, true, false, NULL);
  347. BUG_ON(ret != 0);
  348. ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm);
  349. if (unlikely(ret != 0))
  350. goto out_unreserve;
  351. ret = vmw_bo_map_dma(mob->pt_bo);
  352. if (unlikely(ret != 0))
  353. goto out_unreserve;
  354. ttm_bo_unreserve(mob->pt_bo);
  355. return 0;
  356. out_unreserve:
  357. ttm_bo_unreserve(mob->pt_bo);
  358. ttm_bo_unref(&mob->pt_bo);
  359. return ret;
  360. }
  361. /**
  362. * vmw_mob_assign_ppn - Assign a value to a page table entry
  363. *
  364. * @addr: Pointer to pointer to page table entry.
  365. * @val: The page table entry
  366. *
  367. * Assigns a value to a page table entry pointed to by *@addr and increments
  368. * *@addr according to the page table entry size.
  369. */
  370. #if (VMW_PPN_SIZE == 8)
  371. static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
  372. {
  373. *((__le64 *) *addr) = cpu_to_le64(val >> PAGE_SHIFT);
  374. *addr += 2;
  375. }
  376. #else
  377. static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
  378. {
  379. *(*addr)++ = cpu_to_le32(val >> PAGE_SHIFT);
  380. }
  381. #endif
  382. /*
  383. * vmw_mob_build_pt - Build a pagetable
  384. *
  385. * @data_addr: Array of DMA addresses to the underlying buffer
  386. * object's data pages.
  387. * @num_data_pages: Number of buffer object data pages.
  388. * @pt_pages: Array of page pointers to the page table pages.
  389. *
  390. * Returns the number of page table pages actually used.
  391. * Uses atomic kmaps of highmem pages to avoid TLB thrashing.
  392. */
  393. static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter,
  394. unsigned long num_data_pages,
  395. struct vmw_piter *pt_iter)
  396. {
  397. unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
  398. unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
  399. unsigned long pt_page;
  400. __le32 *addr, *save_addr;
  401. unsigned long i;
  402. struct page *page;
  403. for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) {
  404. page = vmw_piter_page(pt_iter);
  405. save_addr = addr = kmap_atomic(page);
  406. for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) {
  407. vmw_mob_assign_ppn(&addr,
  408. vmw_piter_dma_addr(data_iter));
  409. if (unlikely(--num_data_pages == 0))
  410. break;
  411. WARN_ON(!vmw_piter_next(data_iter));
  412. }
  413. kunmap_atomic(save_addr);
  414. vmw_piter_next(pt_iter);
  415. }
  416. return num_pt_pages;
  417. }
  418. /*
  419. * vmw_mob_build_pt - Set up a multilevel mob pagetable
  420. *
  421. * @mob: Pointer to a mob whose page table needs setting up.
  422. * @data_addr Array of DMA addresses to the buffer object's data
  423. * pages.
  424. * @num_data_pages: Number of buffer object data pages.
  425. *
  426. * Uses tail recursion to set up a multilevel mob page table.
  427. */
  428. static void vmw_mob_pt_setup(struct vmw_mob *mob,
  429. struct vmw_piter data_iter,
  430. unsigned long num_data_pages)
  431. {
  432. unsigned long num_pt_pages = 0;
  433. struct ttm_buffer_object *bo = mob->pt_bo;
  434. struct vmw_piter save_pt_iter;
  435. struct vmw_piter pt_iter;
  436. const struct vmw_sg_table *vsgt;
  437. int ret;
  438. ret = ttm_bo_reserve(bo, false, true, false, NULL);
  439. BUG_ON(ret != 0);
  440. vsgt = vmw_bo_sg_table(bo);
  441. vmw_piter_start(&pt_iter, vsgt, 0);
  442. BUG_ON(!vmw_piter_next(&pt_iter));
  443. mob->pt_level = 0;
  444. while (likely(num_data_pages > 1)) {
  445. ++mob->pt_level;
  446. BUG_ON(mob->pt_level > 2);
  447. save_pt_iter = pt_iter;
  448. num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages,
  449. &pt_iter);
  450. data_iter = save_pt_iter;
  451. num_data_pages = num_pt_pages;
  452. }
  453. mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter);
  454. ttm_bo_unreserve(bo);
  455. }
  456. /*
  457. * vmw_mob_destroy - Destroy a mob, unpopulating first if necessary.
  458. *
  459. * @mob: Pointer to a mob to destroy.
  460. */
  461. void vmw_mob_destroy(struct vmw_mob *mob)
  462. {
  463. if (mob->pt_bo)
  464. ttm_bo_unref(&mob->pt_bo);
  465. kfree(mob);
  466. }
  467. /*
  468. * vmw_mob_unbind - Hide a mob from the device.
  469. *
  470. * @dev_priv: Pointer to a device private.
  471. * @mob_id: Device id of the mob to unbind.
  472. */
  473. void vmw_mob_unbind(struct vmw_private *dev_priv,
  474. struct vmw_mob *mob)
  475. {
  476. struct {
  477. SVGA3dCmdHeader header;
  478. SVGA3dCmdDestroyGBMob body;
  479. } *cmd;
  480. int ret;
  481. struct ttm_buffer_object *bo = mob->pt_bo;
  482. if (bo) {
  483. ret = ttm_bo_reserve(bo, false, true, false, NULL);
  484. /*
  485. * Noone else should be using this buffer.
  486. */
  487. BUG_ON(ret != 0);
  488. }
  489. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  490. if (unlikely(cmd == NULL)) {
  491. DRM_ERROR("Failed reserving FIFO space for Memory "
  492. "Object unbinding.\n");
  493. } else {
  494. cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
  495. cmd->header.size = sizeof(cmd->body);
  496. cmd->body.mobid = mob->id;
  497. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  498. }
  499. if (bo) {
  500. vmw_fence_single_bo(bo, NULL);
  501. ttm_bo_unreserve(bo);
  502. }
  503. vmw_3d_resource_dec(dev_priv, false);
  504. }
  505. /*
  506. * vmw_mob_bind - Make a mob visible to the device after first
  507. * populating it if necessary.
  508. *
  509. * @dev_priv: Pointer to a device private.
  510. * @mob: Pointer to the mob we're making visible.
  511. * @data_addr: Array of DMA addresses to the data pages of the underlying
  512. * buffer object.
  513. * @num_data_pages: Number of data pages of the underlying buffer
  514. * object.
  515. * @mob_id: Device id of the mob to bind
  516. *
  517. * This function is intended to be interfaced with the ttm_tt backend
  518. * code.
  519. */
  520. int vmw_mob_bind(struct vmw_private *dev_priv,
  521. struct vmw_mob *mob,
  522. const struct vmw_sg_table *vsgt,
  523. unsigned long num_data_pages,
  524. int32_t mob_id)
  525. {
  526. int ret;
  527. bool pt_set_up = false;
  528. struct vmw_piter data_iter;
  529. struct {
  530. SVGA3dCmdHeader header;
  531. SVGA3dCmdDefineGBMob64 body;
  532. } *cmd;
  533. mob->id = mob_id;
  534. vmw_piter_start(&data_iter, vsgt, 0);
  535. if (unlikely(!vmw_piter_next(&data_iter)))
  536. return 0;
  537. if (likely(num_data_pages == 1)) {
  538. mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
  539. mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
  540. } else if (vsgt->num_regions == 1) {
  541. mob->pt_level = SVGA3D_MOBFMT_RANGE;
  542. mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
  543. } else if (unlikely(mob->pt_bo == NULL)) {
  544. ret = vmw_mob_pt_populate(dev_priv, mob);
  545. if (unlikely(ret != 0))
  546. return ret;
  547. vmw_mob_pt_setup(mob, data_iter, num_data_pages);
  548. pt_set_up = true;
  549. mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
  550. }
  551. (void) vmw_3d_resource_inc(dev_priv, false);
  552. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  553. if (unlikely(cmd == NULL)) {
  554. DRM_ERROR("Failed reserving FIFO space for Memory "
  555. "Object binding.\n");
  556. goto out_no_cmd_space;
  557. }
  558. cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB64;
  559. cmd->header.size = sizeof(cmd->body);
  560. cmd->body.mobid = mob_id;
  561. cmd->body.ptDepth = mob->pt_level;
  562. cmd->body.base = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
  563. cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
  564. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  565. return 0;
  566. out_no_cmd_space:
  567. vmw_3d_resource_dec(dev_priv, false);
  568. if (pt_set_up)
  569. ttm_bo_unref(&mob->pt_bo);
  570. return -ENOMEM;
  571. }