vmwgfx_cotable.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667
  1. // SPDX-License-Identifier: GPL-2.0 OR MIT
  2. /**************************************************************************
  3. *
  4. * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. /*
  28. * Treat context OTables as resources to make use of the resource
  29. * backing MOB eviction mechanism, that is used to read back the COTable
  30. * whenever the backing MOB is evicted.
  31. */
  32. #include <drm/ttm/ttm_placement.h>
  33. #include "vmwgfx_drv.h"
  34. #include "vmwgfx_resource_priv.h"
  35. #include "vmwgfx_so.h"
  36. /**
  37. * struct vmw_cotable - Context Object Table resource
  38. *
  39. * @res: struct vmw_resource we are deriving from.
  40. * @ctx: non-refcounted pointer to the owning context.
  41. * @size_read_back: Size of data read back during eviction.
  42. * @seen_entries: Seen entries in command stream for this cotable.
  43. * @type: The cotable type.
  44. * @scrubbed: Whether the cotable has been scrubbed.
  45. * @resource_list: List of resources in the cotable.
  46. */
  47. struct vmw_cotable {
  48. struct vmw_resource res;
  49. struct vmw_resource *ctx;
  50. size_t size_read_back;
  51. int seen_entries;
  52. u32 type;
  53. bool scrubbed;
  54. struct list_head resource_list;
  55. };
  56. /**
  57. * struct vmw_cotable_info - Static info about cotable types
  58. *
  59. * @min_initial_entries: Min number of initial intries at cotable allocation
  60. * for this cotable type.
  61. * @size: Size of each entry.
  62. */
  63. struct vmw_cotable_info {
  64. u32 min_initial_entries;
  65. u32 size;
  66. void (*unbind_func)(struct vmw_private *, struct list_head *,
  67. bool);
  68. };
  69. static const struct vmw_cotable_info co_info[] = {
  70. {1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy},
  71. {1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy},
  72. {1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy},
  73. {1, sizeof(SVGACOTableDXElementLayoutEntry), NULL},
  74. {1, sizeof(SVGACOTableDXBlendStateEntry), NULL},
  75. {1, sizeof(SVGACOTableDXDepthStencilEntry), NULL},
  76. {1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL},
  77. {1, sizeof(SVGACOTableDXSamplerEntry), NULL},
  78. {1, sizeof(SVGACOTableDXStreamOutputEntry), NULL},
  79. {1, sizeof(SVGACOTableDXQueryEntry), NULL},
  80. {1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub}
  81. };
  82. /*
  83. * Cotables with bindings that we remove must be scrubbed first,
  84. * otherwise, the device will swap in an invalid context when we remove
  85. * bindings before scrubbing a cotable...
  86. */
  87. const SVGACOTableType vmw_cotable_scrub_order[] = {
  88. SVGA_COTABLE_RTVIEW,
  89. SVGA_COTABLE_DSVIEW,
  90. SVGA_COTABLE_SRVIEW,
  91. SVGA_COTABLE_DXSHADER,
  92. SVGA_COTABLE_ELEMENTLAYOUT,
  93. SVGA_COTABLE_BLENDSTATE,
  94. SVGA_COTABLE_DEPTHSTENCIL,
  95. SVGA_COTABLE_RASTERIZERSTATE,
  96. SVGA_COTABLE_SAMPLER,
  97. SVGA_COTABLE_STREAMOUTPUT,
  98. SVGA_COTABLE_DXQUERY,
  99. };
  100. static int vmw_cotable_bind(struct vmw_resource *res,
  101. struct ttm_validate_buffer *val_buf);
  102. static int vmw_cotable_unbind(struct vmw_resource *res,
  103. bool readback,
  104. struct ttm_validate_buffer *val_buf);
  105. static int vmw_cotable_create(struct vmw_resource *res);
  106. static int vmw_cotable_destroy(struct vmw_resource *res);
  107. static const struct vmw_res_func vmw_cotable_func = {
  108. .res_type = vmw_res_cotable,
  109. .needs_backup = true,
  110. .may_evict = true,
  111. .type_name = "context guest backed object tables",
  112. .backup_placement = &vmw_mob_placement,
  113. .create = vmw_cotable_create,
  114. .destroy = vmw_cotable_destroy,
  115. .bind = vmw_cotable_bind,
  116. .unbind = vmw_cotable_unbind,
  117. };
  118. /**
  119. * vmw_cotable - Convert a struct vmw_resource pointer to a struct
  120. * vmw_cotable pointer
  121. *
  122. * @res: Pointer to the resource.
  123. */
  124. static struct vmw_cotable *vmw_cotable(struct vmw_resource *res)
  125. {
  126. return container_of(res, struct vmw_cotable, res);
  127. }
  128. /**
  129. * vmw_cotable_destroy - Cotable resource destroy callback
  130. *
  131. * @res: Pointer to the cotable resource.
  132. *
  133. * There is no device cotable destroy command, so this function only
  134. * makes sure that the resource id is set to invalid.
  135. */
  136. static int vmw_cotable_destroy(struct vmw_resource *res)
  137. {
  138. res->id = -1;
  139. return 0;
  140. }
  141. /**
  142. * vmw_cotable_unscrub - Undo a cotable unscrub operation
  143. *
  144. * @res: Pointer to the cotable resource
  145. *
  146. * This function issues commands to (re)bind the cotable to
  147. * its backing mob, which needs to be validated and reserved at this point.
  148. * This is identical to bind() except the function interface looks different.
  149. */
  150. static int vmw_cotable_unscrub(struct vmw_resource *res)
  151. {
  152. struct vmw_cotable *vcotbl = vmw_cotable(res);
  153. struct vmw_private *dev_priv = res->dev_priv;
  154. struct ttm_buffer_object *bo = &res->backup->base;
  155. struct {
  156. SVGA3dCmdHeader header;
  157. SVGA3dCmdDXSetCOTable body;
  158. } *cmd;
  159. WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
  160. lockdep_assert_held(&bo->resv->lock.base);
  161. cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), SVGA3D_INVALID_ID);
  162. if (!cmd) {
  163. DRM_ERROR("Failed reserving FIFO space for cotable "
  164. "binding.\n");
  165. return -ENOMEM;
  166. }
  167. WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
  168. WARN_ON(bo->mem.mem_type != VMW_PL_MOB);
  169. cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
  170. cmd->header.size = sizeof(cmd->body);
  171. cmd->body.cid = vcotbl->ctx->id;
  172. cmd->body.type = vcotbl->type;
  173. cmd->body.mobid = bo->mem.start;
  174. cmd->body.validSizeInBytes = vcotbl->size_read_back;
  175. vmw_fifo_commit_flush(dev_priv, sizeof(*cmd));
  176. vcotbl->scrubbed = false;
  177. return 0;
  178. }
  179. /**
  180. * vmw_cotable_bind - Undo a cotable unscrub operation
  181. *
  182. * @res: Pointer to the cotable resource
  183. * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
  184. * for convenience / fencing.
  185. *
  186. * This function issues commands to (re)bind the cotable to
  187. * its backing mob, which needs to be validated and reserved at this point.
  188. */
  189. static int vmw_cotable_bind(struct vmw_resource *res,
  190. struct ttm_validate_buffer *val_buf)
  191. {
  192. /*
  193. * The create() callback may have changed @res->backup without
  194. * the caller noticing, and with val_buf->bo still pointing to
  195. * the old backup buffer. Although hackish, and not used currently,
  196. * take the opportunity to correct the value here so that it's not
  197. * misused in the future.
  198. */
  199. val_buf->bo = &res->backup->base;
  200. return vmw_cotable_unscrub(res);
  201. }
  202. /**
  203. * vmw_cotable_scrub - Scrub the cotable from the device.
  204. *
  205. * @res: Pointer to the cotable resource.
  206. * @readback: Whether initiate a readback of the cotable data to the backup
  207. * buffer.
  208. *
  209. * In some situations (context swapouts) it might be desirable to make the
  210. * device forget about the cotable without performing a full unbind. A full
  211. * unbind requires reserved backup buffers and it might not be possible to
  212. * reserve them due to locking order violation issues. The vmw_cotable_scrub
  213. * function implements a partial unbind() without that requirement but with the
  214. * following restrictions.
  215. * 1) Before the cotable is again used by the GPU, vmw_cotable_unscrub() must
  216. * be called.
  217. * 2) Before the cotable backing buffer is used by the CPU, or during the
  218. * resource destruction, vmw_cotable_unbind() must be called.
  219. */
  220. int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
  221. {
  222. struct vmw_cotable *vcotbl = vmw_cotable(res);
  223. struct vmw_private *dev_priv = res->dev_priv;
  224. size_t submit_size;
  225. struct {
  226. SVGA3dCmdHeader header;
  227. SVGA3dCmdDXReadbackCOTable body;
  228. } *cmd0;
  229. struct {
  230. SVGA3dCmdHeader header;
  231. SVGA3dCmdDXSetCOTable body;
  232. } *cmd1;
  233. if (vcotbl->scrubbed)
  234. return 0;
  235. if (co_info[vcotbl->type].unbind_func)
  236. co_info[vcotbl->type].unbind_func(dev_priv,
  237. &vcotbl->resource_list,
  238. readback);
  239. submit_size = sizeof(*cmd1);
  240. if (readback)
  241. submit_size += sizeof(*cmd0);
  242. cmd1 = vmw_fifo_reserve_dx(dev_priv, submit_size, SVGA3D_INVALID_ID);
  243. if (!cmd1) {
  244. DRM_ERROR("Failed reserving FIFO space for cotable "
  245. "unbinding.\n");
  246. return -ENOMEM;
  247. }
  248. vcotbl->size_read_back = 0;
  249. if (readback) {
  250. cmd0 = (void *) cmd1;
  251. cmd0->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
  252. cmd0->header.size = sizeof(cmd0->body);
  253. cmd0->body.cid = vcotbl->ctx->id;
  254. cmd0->body.type = vcotbl->type;
  255. cmd1 = (void *) &cmd0[1];
  256. vcotbl->size_read_back = res->backup_size;
  257. }
  258. cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
  259. cmd1->header.size = sizeof(cmd1->body);
  260. cmd1->body.cid = vcotbl->ctx->id;
  261. cmd1->body.type = vcotbl->type;
  262. cmd1->body.mobid = SVGA3D_INVALID_ID;
  263. cmd1->body.validSizeInBytes = 0;
  264. vmw_fifo_commit_flush(dev_priv, submit_size);
  265. vcotbl->scrubbed = true;
  266. /* Trigger a create() on next validate. */
  267. res->id = -1;
  268. return 0;
  269. }
  270. /**
  271. * vmw_cotable_unbind - Cotable resource unbind callback
  272. *
  273. * @res: Pointer to the cotable resource.
  274. * @readback: Whether to read back cotable data to the backup buffer.
  275. * val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
  276. * for convenience / fencing.
  277. *
  278. * Unbinds the cotable from the device and fences the backup buffer.
  279. */
  280. static int vmw_cotable_unbind(struct vmw_resource *res,
  281. bool readback,
  282. struct ttm_validate_buffer *val_buf)
  283. {
  284. struct vmw_cotable *vcotbl = vmw_cotable(res);
  285. struct vmw_private *dev_priv = res->dev_priv;
  286. struct ttm_buffer_object *bo = val_buf->bo;
  287. struct vmw_fence_obj *fence;
  288. if (list_empty(&res->mob_head))
  289. return 0;
  290. WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
  291. lockdep_assert_held(&bo->resv->lock.base);
  292. mutex_lock(&dev_priv->binding_mutex);
  293. if (!vcotbl->scrubbed)
  294. vmw_dx_context_scrub_cotables(vcotbl->ctx, readback);
  295. mutex_unlock(&dev_priv->binding_mutex);
  296. (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
  297. vmw_bo_fence_single(bo, fence);
  298. if (likely(fence != NULL))
  299. vmw_fence_obj_unreference(&fence);
  300. return 0;
  301. }
  302. /**
  303. * vmw_cotable_readback - Read back a cotable without unbinding.
  304. *
  305. * @res: The cotable resource.
  306. *
  307. * Reads back a cotable to its backing mob without scrubbing the MOB from
  308. * the cotable. The MOB is fenced for subsequent CPU access.
  309. */
  310. static int vmw_cotable_readback(struct vmw_resource *res)
  311. {
  312. struct vmw_cotable *vcotbl = vmw_cotable(res);
  313. struct vmw_private *dev_priv = res->dev_priv;
  314. struct {
  315. SVGA3dCmdHeader header;
  316. SVGA3dCmdDXReadbackCOTable body;
  317. } *cmd;
  318. struct vmw_fence_obj *fence;
  319. if (!vcotbl->scrubbed) {
  320. cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd),
  321. SVGA3D_INVALID_ID);
  322. if (!cmd) {
  323. DRM_ERROR("Failed reserving FIFO space for cotable "
  324. "readback.\n");
  325. return -ENOMEM;
  326. }
  327. cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
  328. cmd->header.size = sizeof(cmd->body);
  329. cmd->body.cid = vcotbl->ctx->id;
  330. cmd->body.type = vcotbl->type;
  331. vcotbl->size_read_back = res->backup_size;
  332. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  333. }
  334. (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
  335. vmw_bo_fence_single(&res->backup->base, fence);
  336. vmw_fence_obj_unreference(&fence);
  337. return 0;
  338. }
  339. /**
  340. * vmw_cotable_resize - Resize a cotable.
  341. *
  342. * @res: The cotable resource.
  343. * @new_size: The new size.
  344. *
  345. * Resizes a cotable and binds the new backup buffer.
  346. * On failure the cotable is left intact.
  347. * Important! This function may not fail once the MOB switch has been
  348. * committed to hardware. That would put the device context in an
  349. * invalid state which we can't currently recover from.
  350. */
  351. static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
  352. {
  353. struct ttm_operation_ctx ctx = { false, false };
  354. struct vmw_private *dev_priv = res->dev_priv;
  355. struct vmw_cotable *vcotbl = vmw_cotable(res);
  356. struct vmw_buffer_object *buf, *old_buf = res->backup;
  357. struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
  358. size_t old_size = res->backup_size;
  359. size_t old_size_read_back = vcotbl->size_read_back;
  360. size_t cur_size_read_back;
  361. struct ttm_bo_kmap_obj old_map, new_map;
  362. int ret;
  363. size_t i;
  364. ret = vmw_cotable_readback(res);
  365. if (ret)
  366. return ret;
  367. cur_size_read_back = vcotbl->size_read_back;
  368. vcotbl->size_read_back = old_size_read_back;
  369. /*
  370. * While device is processing, Allocate and reserve a buffer object
  371. * for the new COTable. Initially pin the buffer object to make sure
  372. * we can use tryreserve without failure.
  373. */
  374. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  375. if (!buf)
  376. return -ENOMEM;
  377. ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
  378. true, vmw_bo_bo_free);
  379. if (ret) {
  380. DRM_ERROR("Failed initializing new cotable MOB.\n");
  381. return ret;
  382. }
  383. bo = &buf->base;
  384. WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, NULL));
  385. ret = ttm_bo_wait(old_bo, false, false);
  386. if (unlikely(ret != 0)) {
  387. DRM_ERROR("Failed waiting for cotable unbind.\n");
  388. goto out_wait;
  389. }
  390. /*
  391. * Do a page by page copy of COTables. This eliminates slow vmap()s.
  392. * This should really be a TTM utility.
  393. */
  394. for (i = 0; i < old_bo->num_pages; ++i) {
  395. bool dummy;
  396. ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
  397. if (unlikely(ret != 0)) {
  398. DRM_ERROR("Failed mapping old COTable on resize.\n");
  399. goto out_wait;
  400. }
  401. ret = ttm_bo_kmap(bo, i, 1, &new_map);
  402. if (unlikely(ret != 0)) {
  403. DRM_ERROR("Failed mapping new COTable on resize.\n");
  404. goto out_map_new;
  405. }
  406. memcpy(ttm_kmap_obj_virtual(&new_map, &dummy),
  407. ttm_kmap_obj_virtual(&old_map, &dummy),
  408. PAGE_SIZE);
  409. ttm_bo_kunmap(&new_map);
  410. ttm_bo_kunmap(&old_map);
  411. }
  412. /* Unpin new buffer, and switch backup buffers. */
  413. ret = ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
  414. if (unlikely(ret != 0)) {
  415. DRM_ERROR("Failed validating new COTable backup buffer.\n");
  416. goto out_wait;
  417. }
  418. res->backup = buf;
  419. res->backup_size = new_size;
  420. vcotbl->size_read_back = cur_size_read_back;
  421. /*
  422. * Now tell the device to switch. If this fails, then we need to
  423. * revert the full resize.
  424. */
  425. ret = vmw_cotable_unscrub(res);
  426. if (ret) {
  427. DRM_ERROR("Failed switching COTable backup buffer.\n");
  428. res->backup = old_buf;
  429. res->backup_size = old_size;
  430. vcotbl->size_read_back = old_size_read_back;
  431. goto out_wait;
  432. }
  433. /* Let go of the old mob. */
  434. list_del(&res->mob_head);
  435. list_add_tail(&res->mob_head, &buf->res_list);
  436. vmw_bo_unreference(&old_buf);
  437. res->id = vcotbl->type;
  438. return 0;
  439. out_map_new:
  440. ttm_bo_kunmap(&old_map);
  441. out_wait:
  442. ttm_bo_unreserve(bo);
  443. vmw_bo_unreference(&buf);
  444. return ret;
  445. }
  446. /**
  447. * vmw_cotable_create - Cotable resource create callback
  448. *
  449. * @res: Pointer to a cotable resource.
  450. *
  451. * There is no separate create command for cotables, so this callback, which
  452. * is called before bind() in the validation sequence is instead used for two
  453. * things.
  454. * 1) Unscrub the cotable if it is scrubbed and still attached to a backup
  455. * buffer, that is, if @res->mob_head is non-empty.
  456. * 2) Resize the cotable if needed.
  457. */
  458. static int vmw_cotable_create(struct vmw_resource *res)
  459. {
  460. struct vmw_cotable *vcotbl = vmw_cotable(res);
  461. size_t new_size = res->backup_size;
  462. size_t needed_size;
  463. int ret;
  464. /* Check whether we need to resize the cotable */
  465. needed_size = (vcotbl->seen_entries + 1) * co_info[vcotbl->type].size;
  466. while (needed_size > new_size)
  467. new_size *= 2;
  468. if (likely(new_size <= res->backup_size)) {
  469. if (vcotbl->scrubbed && !list_empty(&res->mob_head)) {
  470. ret = vmw_cotable_unscrub(res);
  471. if (ret)
  472. return ret;
  473. }
  474. res->id = vcotbl->type;
  475. return 0;
  476. }
  477. return vmw_cotable_resize(res, new_size);
  478. }
  479. /**
  480. * vmw_hw_cotable_destroy - Cotable hw_destroy callback
  481. *
  482. * @res: Pointer to a cotable resource.
  483. *
  484. * The final (part of resource destruction) destroy callback.
  485. */
  486. static void vmw_hw_cotable_destroy(struct vmw_resource *res)
  487. {
  488. (void) vmw_cotable_destroy(res);
  489. }
  490. static size_t cotable_acc_size;
  491. /**
  492. * vmw_cotable_free - Cotable resource destructor
  493. *
  494. * @res: Pointer to a cotable resource.
  495. */
  496. static void vmw_cotable_free(struct vmw_resource *res)
  497. {
  498. struct vmw_private *dev_priv = res->dev_priv;
  499. kfree(res);
  500. ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
  501. }
  502. /**
  503. * vmw_cotable_alloc - Create a cotable resource
  504. *
  505. * @dev_priv: Pointer to a device private struct.
  506. * @ctx: Pointer to the context resource.
  507. * The cotable resource will not add a refcount.
  508. * @type: The cotable type.
  509. */
  510. struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
  511. struct vmw_resource *ctx,
  512. u32 type)
  513. {
  514. struct vmw_cotable *vcotbl;
  515. struct ttm_operation_ctx ttm_opt_ctx = {
  516. .interruptible = true,
  517. .no_wait_gpu = false
  518. };
  519. int ret;
  520. u32 num_entries;
  521. if (unlikely(cotable_acc_size == 0))
  522. cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable));
  523. ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
  524. cotable_acc_size, &ttm_opt_ctx);
  525. if (unlikely(ret))
  526. return ERR_PTR(ret);
  527. vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
  528. if (unlikely(!vcotbl)) {
  529. ret = -ENOMEM;
  530. goto out_no_alloc;
  531. }
  532. ret = vmw_resource_init(dev_priv, &vcotbl->res, true,
  533. vmw_cotable_free, &vmw_cotable_func);
  534. if (unlikely(ret != 0))
  535. goto out_no_init;
  536. INIT_LIST_HEAD(&vcotbl->resource_list);
  537. vcotbl->res.id = type;
  538. vcotbl->res.backup_size = PAGE_SIZE;
  539. num_entries = PAGE_SIZE / co_info[type].size;
  540. if (num_entries < co_info[type].min_initial_entries) {
  541. vcotbl->res.backup_size = co_info[type].min_initial_entries *
  542. co_info[type].size;
  543. vcotbl->res.backup_size =
  544. (vcotbl->res.backup_size + PAGE_SIZE - 1) & PAGE_MASK;
  545. }
  546. vcotbl->scrubbed = true;
  547. vcotbl->seen_entries = -1;
  548. vcotbl->type = type;
  549. vcotbl->ctx = ctx;
  550. vcotbl->res.hw_destroy = vmw_hw_cotable_destroy;
  551. return &vcotbl->res;
  552. out_no_init:
  553. kfree(vcotbl);
  554. out_no_alloc:
  555. ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
  556. return ERR_PTR(ret);
  557. }
  558. /**
  559. * vmw_cotable_notify - Notify the cotable about an item creation
  560. *
  561. * @res: Pointer to a cotable resource.
  562. * @id: Item id.
  563. */
  564. int vmw_cotable_notify(struct vmw_resource *res, int id)
  565. {
  566. struct vmw_cotable *vcotbl = vmw_cotable(res);
  567. if (id < 0 || id >= SVGA_COTABLE_MAX_IDS) {
  568. DRM_ERROR("Illegal COTable id. Type is %u. Id is %d\n",
  569. (unsigned) vcotbl->type, id);
  570. return -EINVAL;
  571. }
  572. if (vcotbl->seen_entries < id) {
  573. /* Trigger a call to create() on next validate */
  574. res->id = -1;
  575. vcotbl->seen_entries = id;
  576. }
  577. return 0;
  578. }
  579. /**
  580. * vmw_cotable_add_view - add a view to the cotable's list of active views.
  581. *
  582. * @res: pointer struct vmw_resource representing the cotable.
  583. * @head: pointer to the struct list_head member of the resource, dedicated
  584. * to the cotable active resource list.
  585. */
  586. void vmw_cotable_add_resource(struct vmw_resource *res, struct list_head *head)
  587. {
  588. struct vmw_cotable *vcotbl =
  589. container_of(res, struct vmw_cotable, res);
  590. list_add_tail(head, &vcotbl->resource_list);
  591. }