vmwgfx_validation.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789
  1. // SPDX-License-Identifier: GPL-2.0 OR MIT
  2. /**************************************************************************
  3. *
  4. * Copyright © 2018 VMware, Inc., Palo Alto, CA., USA
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. **************************************************************************/
  28. #include <linux/slab.h>
  29. #include "vmwgfx_validation.h"
  30. #include "vmwgfx_drv.h"
  31. /**
  32. * struct vmw_validation_bo_node - Buffer object validation metadata.
  33. * @base: Metadata used for TTM reservation- and validation.
  34. * @hash: A hash entry used for the duplicate detection hash table.
  35. * @as_mob: Validate as mob.
  36. * @cpu_blit: Validate for cpu blit access.
  37. *
  38. * Bit fields are used since these structures are allocated and freed in
  39. * large numbers and space conservation is desired.
  40. */
  41. struct vmw_validation_bo_node {
  42. struct ttm_validate_buffer base;
  43. struct drm_hash_item hash;
  44. u32 as_mob : 1;
  45. u32 cpu_blit : 1;
  46. };
  47. /**
  48. * struct vmw_validation_res_node - Resource validation metadata.
  49. * @head: List head for the resource validation list.
  50. * @hash: A hash entry used for the duplicate detection hash table.
  51. * @res: Reference counted resource pointer.
  52. * @new_backup: Non ref-counted pointer to new backup buffer to be assigned
  53. * to a resource.
  54. * @new_backup_offset: Offset into the new backup mob for resources that can
  55. * share MOBs.
  56. * @no_buffer_needed: Kernel does not need to allocate a MOB during validation,
  57. * the command stream provides a mob bind operation.
  58. * @switching_backup: The validation process is switching backup MOB.
  59. * @first_usage: True iff the resource has been seen only once in the current
  60. * validation batch.
  61. * @reserved: Whether the resource is currently reserved by this process.
  62. * @private: Optionally additional memory for caller-private data.
  63. *
  64. * Bit fields are used since these structures are allocated and freed in
  65. * large numbers and space conservation is desired.
  66. */
  67. struct vmw_validation_res_node {
  68. struct list_head head;
  69. struct drm_hash_item hash;
  70. struct vmw_resource *res;
  71. struct vmw_buffer_object *new_backup;
  72. unsigned long new_backup_offset;
  73. u32 no_buffer_needed : 1;
  74. u32 switching_backup : 1;
  75. u32 first_usage : 1;
  76. u32 reserved : 1;
  77. unsigned long private[0];
  78. };
  79. /**
  80. * vmw_validation_mem_alloc - Allocate kernel memory from the validation
  81. * context based allocator
  82. * @ctx: The validation context
  83. * @size: The number of bytes to allocated.
  84. *
  85. * The memory allocated may not exceed PAGE_SIZE, and the returned
  86. * address is aligned to sizeof(long). All memory allocated this way is
  87. * reclaimed after validation when calling any of the exported functions:
  88. * vmw_validation_unref_lists()
  89. * vmw_validation_revert()
  90. * vmw_validation_done()
  91. *
  92. * Return: Pointer to the allocated memory on success. NULL on failure.
  93. */
  94. void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
  95. unsigned int size)
  96. {
  97. void *addr;
  98. size = vmw_validation_align(size);
  99. if (size > PAGE_SIZE)
  100. return NULL;
  101. if (ctx->mem_size_left < size) {
  102. struct page *page;
  103. if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
  104. int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran);
  105. if (ret)
  106. return NULL;
  107. ctx->vm_size_left += ctx->vm->gran;
  108. ctx->total_mem += ctx->vm->gran;
  109. }
  110. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  111. if (!page)
  112. return NULL;
  113. if (ctx->vm)
  114. ctx->vm_size_left -= PAGE_SIZE;
  115. list_add_tail(&page->lru, &ctx->page_list);
  116. ctx->page_address = page_address(page);
  117. ctx->mem_size_left = PAGE_SIZE;
  118. }
  119. addr = (void *) (ctx->page_address + (PAGE_SIZE - ctx->mem_size_left));
  120. ctx->mem_size_left -= size;
  121. return addr;
  122. }
  123. /**
  124. * vmw_validation_mem_free - Free all memory allocated using
  125. * vmw_validation_mem_alloc()
  126. * @ctx: The validation context
  127. *
  128. * All memory previously allocated for this context using
  129. * vmw_validation_mem_alloc() is freed.
  130. */
  131. static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
  132. {
  133. struct page *entry, *next;
  134. list_for_each_entry_safe(entry, next, &ctx->page_list, lru) {
  135. list_del_init(&entry->lru);
  136. __free_page(entry);
  137. }
  138. ctx->mem_size_left = 0;
  139. if (ctx->vm && ctx->total_mem) {
  140. ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem);
  141. ctx->total_mem = 0;
  142. ctx->vm_size_left = 0;
  143. }
  144. }
  145. /**
  146. * vmw_validation_find_bo_dup - Find a duplicate buffer object entry in the
  147. * validation context's lists.
  148. * @ctx: The validation context to search.
  149. * @vbo: The buffer object to search for.
  150. *
  151. * Return: Pointer to the struct vmw_validation_bo_node referencing the
  152. * duplicate, or NULL if none found.
  153. */
  154. static struct vmw_validation_bo_node *
  155. vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
  156. struct vmw_buffer_object *vbo)
  157. {
  158. struct vmw_validation_bo_node *bo_node = NULL;
  159. if (!ctx->merge_dups)
  160. return NULL;
  161. if (ctx->ht) {
  162. struct drm_hash_item *hash;
  163. if (!drm_ht_find_item(ctx->ht, (unsigned long) vbo, &hash))
  164. bo_node = container_of(hash, typeof(*bo_node), hash);
  165. } else {
  166. struct vmw_validation_bo_node *entry;
  167. list_for_each_entry(entry, &ctx->bo_list, base.head) {
  168. if (entry->base.bo == &vbo->base) {
  169. bo_node = entry;
  170. break;
  171. }
  172. }
  173. }
  174. return bo_node;
  175. }
  176. /**
  177. * vmw_validation_find_res_dup - Find a duplicate resource entry in the
  178. * validation context's lists.
  179. * @ctx: The validation context to search.
  180. * @vbo: The buffer object to search for.
  181. *
  182. * Return: Pointer to the struct vmw_validation_bo_node referencing the
  183. * duplicate, or NULL if none found.
  184. */
  185. static struct vmw_validation_res_node *
  186. vmw_validation_find_res_dup(struct vmw_validation_context *ctx,
  187. struct vmw_resource *res)
  188. {
  189. struct vmw_validation_res_node *res_node = NULL;
  190. if (!ctx->merge_dups)
  191. return NULL;
  192. if (ctx->ht) {
  193. struct drm_hash_item *hash;
  194. if (!drm_ht_find_item(ctx->ht, (unsigned long) res, &hash))
  195. res_node = container_of(hash, typeof(*res_node), hash);
  196. } else {
  197. struct vmw_validation_res_node *entry;
  198. list_for_each_entry(entry, &ctx->resource_ctx_list, head) {
  199. if (entry->res == res) {
  200. res_node = entry;
  201. goto out;
  202. }
  203. }
  204. list_for_each_entry(entry, &ctx->resource_list, head) {
  205. if (entry->res == res) {
  206. res_node = entry;
  207. break;
  208. }
  209. }
  210. }
  211. out:
  212. return res_node;
  213. }
  214. /**
  215. * vmw_validation_add_bo - Add a buffer object to the validation context.
  216. * @ctx: The validation context.
  217. * @vbo: The buffer object.
  218. * @as_mob: Validate as mob, otherwise suitable for GMR operations.
  219. * @cpu_blit: Validate in a page-mappable location.
  220. *
  221. * Return: Zero on success, negative error code otherwise.
  222. */
  223. int vmw_validation_add_bo(struct vmw_validation_context *ctx,
  224. struct vmw_buffer_object *vbo,
  225. bool as_mob,
  226. bool cpu_blit)
  227. {
  228. struct vmw_validation_bo_node *bo_node;
  229. bo_node = vmw_validation_find_bo_dup(ctx, vbo);
  230. if (bo_node) {
  231. if (bo_node->as_mob != as_mob ||
  232. bo_node->cpu_blit != cpu_blit) {
  233. DRM_ERROR("Inconsistent buffer usage.\n");
  234. return -EINVAL;
  235. }
  236. } else {
  237. struct ttm_validate_buffer *val_buf;
  238. int ret;
  239. bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node));
  240. if (!bo_node)
  241. return -ENOMEM;
  242. if (ctx->ht) {
  243. bo_node->hash.key = (unsigned long) vbo;
  244. ret = drm_ht_insert_item(ctx->ht, &bo_node->hash);
  245. if (ret) {
  246. DRM_ERROR("Failed to initialize a buffer "
  247. "validation entry.\n");
  248. return ret;
  249. }
  250. }
  251. val_buf = &bo_node->base;
  252. val_buf->bo = ttm_bo_get_unless_zero(&vbo->base);
  253. if (!val_buf->bo)
  254. return -ESRCH;
  255. val_buf->shared = false;
  256. list_add_tail(&val_buf->head, &ctx->bo_list);
  257. bo_node->as_mob = as_mob;
  258. bo_node->cpu_blit = cpu_blit;
  259. }
  260. return 0;
  261. }
  262. /**
  263. * vmw_validation_add_resource - Add a resource to the validation context.
  264. * @ctx: The validation context.
  265. * @res: The resource.
  266. * @priv_size: Size of private, additional metadata.
  267. * @p_node: Output pointer of additional metadata address.
  268. * @first_usage: Whether this was the first time this resource was seen.
  269. *
  270. * Return: Zero on success, negative error code otherwise.
  271. */
  272. int vmw_validation_add_resource(struct vmw_validation_context *ctx,
  273. struct vmw_resource *res,
  274. size_t priv_size,
  275. void **p_node,
  276. bool *first_usage)
  277. {
  278. struct vmw_validation_res_node *node;
  279. int ret;
  280. node = vmw_validation_find_res_dup(ctx, res);
  281. if (node) {
  282. node->first_usage = 0;
  283. goto out_fill;
  284. }
  285. node = vmw_validation_mem_alloc(ctx, sizeof(*node) + priv_size);
  286. if (!node) {
  287. DRM_ERROR("Failed to allocate a resource validation "
  288. "entry.\n");
  289. return -ENOMEM;
  290. }
  291. if (ctx->ht) {
  292. node->hash.key = (unsigned long) res;
  293. ret = drm_ht_insert_item(ctx->ht, &node->hash);
  294. if (ret) {
  295. DRM_ERROR("Failed to initialize a resource validation "
  296. "entry.\n");
  297. return ret;
  298. }
  299. }
  300. node->res = vmw_resource_reference_unless_doomed(res);
  301. if (!node->res)
  302. return -ESRCH;
  303. node->first_usage = 1;
  304. if (!res->dev_priv->has_mob) {
  305. list_add_tail(&node->head, &ctx->resource_list);
  306. } else {
  307. switch (vmw_res_type(res)) {
  308. case vmw_res_context:
  309. case vmw_res_dx_context:
  310. list_add(&node->head, &ctx->resource_ctx_list);
  311. break;
  312. case vmw_res_cotable:
  313. list_add_tail(&node->head, &ctx->resource_ctx_list);
  314. break;
  315. default:
  316. list_add_tail(&node->head, &ctx->resource_list);
  317. break;
  318. }
  319. }
  320. out_fill:
  321. if (first_usage)
  322. *first_usage = node->first_usage;
  323. if (p_node)
  324. *p_node = &node->private;
  325. return 0;
  326. }
  327. /**
  328. * vmw_validation_res_switch_backup - Register a backup MOB switch during
  329. * validation.
  330. * @ctx: The validation context.
  331. * @val_private: The additional meta-data pointer returned when the
  332. * resource was registered with the validation context. Used to identify
  333. * the resource.
  334. * @vbo: The new backup buffer object MOB. This buffer object needs to have
  335. * already been registered with the validation context.
  336. * @backup_offset: Offset into the new backup MOB.
  337. */
  338. void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
  339. void *val_private,
  340. struct vmw_buffer_object *vbo,
  341. unsigned long backup_offset)
  342. {
  343. struct vmw_validation_res_node *val;
  344. val = container_of(val_private, typeof(*val), private);
  345. val->switching_backup = 1;
  346. if (val->first_usage)
  347. val->no_buffer_needed = 1;
  348. val->new_backup = vbo;
  349. val->new_backup_offset = backup_offset;
  350. }
  351. /**
  352. * vmw_validation_res_reserve - Reserve all resources registered with this
  353. * validation context.
  354. * @ctx: The validation context.
  355. * @intr: Use interruptible waits when possible.
  356. *
  357. * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
  358. * code on failure.
  359. */
  360. int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
  361. bool intr)
  362. {
  363. struct vmw_validation_res_node *val;
  364. int ret = 0;
  365. list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
  366. list_for_each_entry(val, &ctx->resource_list, head) {
  367. struct vmw_resource *res = val->res;
  368. ret = vmw_resource_reserve(res, intr, val->no_buffer_needed);
  369. if (ret)
  370. goto out_unreserve;
  371. val->reserved = 1;
  372. if (res->backup) {
  373. struct vmw_buffer_object *vbo = res->backup;
  374. ret = vmw_validation_add_bo
  375. (ctx, vbo, vmw_resource_needs_backup(res),
  376. false);
  377. if (ret)
  378. goto out_unreserve;
  379. }
  380. }
  381. return 0;
  382. out_unreserve:
  383. vmw_validation_res_unreserve(ctx, true);
  384. return ret;
  385. }
  386. /**
  387. * vmw_validation_res_unreserve - Unreserve all reserved resources
  388. * registered with this validation context.
  389. * @ctx: The validation context.
  390. * @backoff: Whether this is a backoff- of a commit-type operation. This
  391. * is used to determine whether to switch backup MOBs or not.
  392. */
  393. void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
  394. bool backoff)
  395. {
  396. struct vmw_validation_res_node *val;
  397. list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
  398. list_for_each_entry(val, &ctx->resource_list, head) {
  399. if (val->reserved)
  400. vmw_resource_unreserve(val->res,
  401. !backoff &&
  402. val->switching_backup,
  403. val->new_backup,
  404. val->new_backup_offset);
  405. }
  406. }
  407. /**
  408. * vmw_validation_bo_validate_single - Validate a single buffer object.
  409. * @bo: The TTM buffer object base.
  410. * @interruptible: Whether to perform waits interruptible if possible.
  411. * @validate_as_mob: Whether to validate in MOB memory.
  412. *
  413. * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
  414. * code on failure.
  415. */
  416. int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
  417. bool interruptible,
  418. bool validate_as_mob)
  419. {
  420. struct vmw_buffer_object *vbo =
  421. container_of(bo, struct vmw_buffer_object, base);
  422. struct ttm_operation_ctx ctx = {
  423. .interruptible = interruptible,
  424. .no_wait_gpu = false
  425. };
  426. int ret;
  427. if (vbo->pin_count > 0)
  428. return 0;
  429. if (validate_as_mob)
  430. return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
  431. /**
  432. * Put BO in VRAM if there is space, otherwise as a GMR.
  433. * If there is no space in VRAM and GMR ids are all used up,
  434. * start evicting GMRs to make room. If the DMA buffer can't be
  435. * used as a GMR, this will return -ENOMEM.
  436. */
  437. ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
  438. if (ret == 0 || ret == -ERESTARTSYS)
  439. return ret;
  440. /**
  441. * If that failed, try VRAM again, this time evicting
  442. * previous contents.
  443. */
  444. ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
  445. return ret;
  446. }
  447. /**
  448. * vmw_validation_bo_validate - Validate all buffer objects registered with
  449. * the validation context.
  450. * @ctx: The validation context.
  451. * @intr: Whether to perform waits interruptible if possible.
  452. *
  453. * Return: Zero on success, -ERESTARTSYS if interrupted,
  454. * negative error code on failure.
  455. */
  456. int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
  457. {
  458. struct vmw_validation_bo_node *entry;
  459. int ret;
  460. list_for_each_entry(entry, &ctx->bo_list, base.head) {
  461. if (entry->cpu_blit) {
  462. struct ttm_operation_ctx ctx = {
  463. .interruptible = intr,
  464. .no_wait_gpu = false
  465. };
  466. ret = ttm_bo_validate(entry->base.bo,
  467. &vmw_nonfixed_placement, &ctx);
  468. } else {
  469. ret = vmw_validation_bo_validate_single
  470. (entry->base.bo, intr, entry->as_mob);
  471. }
  472. if (ret)
  473. return ret;
  474. }
  475. return 0;
  476. }
  477. /**
  478. * vmw_validation_res_validate - Validate all resources registered with the
  479. * validation context.
  480. * @ctx: The validation context.
  481. * @intr: Whether to perform waits interruptible if possible.
  482. *
  483. * Before this function is called, all resource backup buffers must have
  484. * been validated.
  485. *
  486. * Return: Zero on success, -ERESTARTSYS if interrupted,
  487. * negative error code on failure.
  488. */
  489. int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
  490. {
  491. struct vmw_validation_res_node *val;
  492. int ret;
  493. list_for_each_entry(val, &ctx->resource_list, head) {
  494. struct vmw_resource *res = val->res;
  495. struct vmw_buffer_object *backup = res->backup;
  496. ret = vmw_resource_validate(res, intr);
  497. if (ret) {
  498. if (ret != -ERESTARTSYS)
  499. DRM_ERROR("Failed to validate resource.\n");
  500. return ret;
  501. }
  502. /* Check if the resource switched backup buffer */
  503. if (backup && res->backup && (backup != res->backup)) {
  504. struct vmw_buffer_object *vbo = res->backup;
  505. ret = vmw_validation_add_bo
  506. (ctx, vbo, vmw_resource_needs_backup(res),
  507. false);
  508. if (ret)
  509. return ret;
  510. }
  511. }
  512. return 0;
  513. }
  514. /**
  515. * vmw_validation_drop_ht - Reset the hash table used for duplicate finding
  516. * and unregister it from this validation context.
  517. * @ctx: The validation context.
  518. *
  519. * The hash table used for duplicate finding is an expensive resource and
  520. * may be protected by mutexes that may cause deadlocks during resource
  521. * unreferencing if held. After resource- and buffer object registering,
  522. * there is no longer any use for this hash table, so allow freeing it
  523. * either to shorten any mutex locking time, or before resources- and
  524. * buffer objects are freed during validation context cleanup.
  525. */
  526. void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
  527. {
  528. struct vmw_validation_bo_node *entry;
  529. struct vmw_validation_res_node *val;
  530. if (!ctx->ht)
  531. return;
  532. list_for_each_entry(entry, &ctx->bo_list, base.head)
  533. (void) drm_ht_remove_item(ctx->ht, &entry->hash);
  534. list_for_each_entry(val, &ctx->resource_list, head)
  535. (void) drm_ht_remove_item(ctx->ht, &val->hash);
  536. list_for_each_entry(val, &ctx->resource_ctx_list, head)
  537. (void) drm_ht_remove_item(ctx->ht, &val->hash);
  538. ctx->ht = NULL;
  539. }
  540. /**
  541. * vmw_validation_unref_lists - Unregister previously registered buffer
  542. * object and resources.
  543. * @ctx: The validation context.
  544. *
  545. * Note that this function may cause buffer object- and resource destructors
  546. * to be invoked.
  547. */
  548. void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
  549. {
  550. struct vmw_validation_bo_node *entry;
  551. struct vmw_validation_res_node *val;
  552. list_for_each_entry(entry, &ctx->bo_list, base.head)
  553. ttm_bo_unref(&entry->base.bo);
  554. list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
  555. list_for_each_entry(val, &ctx->resource_list, head)
  556. vmw_resource_unreference(&val->res);
  557. /*
  558. * No need to detach each list entry since they are all freed with
  559. * vmw_validation_free_mem. Just make the inaccessible.
  560. */
  561. INIT_LIST_HEAD(&ctx->bo_list);
  562. INIT_LIST_HEAD(&ctx->resource_list);
  563. vmw_validation_mem_free(ctx);
  564. }
  565. /**
  566. * vmw_validation_prepare - Prepare a validation context for command
  567. * submission.
  568. * @ctx: The validation context.
  569. * @mutex: The mutex used to protect resource reservation.
  570. * @intr: Whether to perform waits interruptible if possible.
  571. *
  572. * Note that the single reservation mutex @mutex is an unfortunate
  573. * construct. Ideally resource reservation should be moved to per-resource
  574. * ww_mutexes.
  575. * If this functions doesn't return Zero to indicate success, all resources
  576. * are left unreserved but still referenced.
  577. * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
  578. * on error.
  579. */
  580. int vmw_validation_prepare(struct vmw_validation_context *ctx,
  581. struct mutex *mutex,
  582. bool intr)
  583. {
  584. int ret = 0;
  585. if (mutex) {
  586. if (intr)
  587. ret = mutex_lock_interruptible(mutex);
  588. else
  589. mutex_lock(mutex);
  590. if (ret)
  591. return -ERESTARTSYS;
  592. }
  593. ctx->res_mutex = mutex;
  594. ret = vmw_validation_res_reserve(ctx, intr);
  595. if (ret)
  596. goto out_no_res_reserve;
  597. ret = vmw_validation_bo_reserve(ctx, intr);
  598. if (ret)
  599. goto out_no_bo_reserve;
  600. ret = vmw_validation_bo_validate(ctx, intr);
  601. if (ret)
  602. goto out_no_validate;
  603. ret = vmw_validation_res_validate(ctx, intr);
  604. if (ret)
  605. goto out_no_validate;
  606. return 0;
  607. out_no_validate:
  608. vmw_validation_bo_backoff(ctx);
  609. out_no_bo_reserve:
  610. vmw_validation_res_unreserve(ctx, true);
  611. out_no_res_reserve:
  612. if (mutex)
  613. mutex_unlock(mutex);
  614. return ret;
  615. }
  616. /**
  617. * vmw_validation_revert - Revert validation actions if command submission
  618. * failed.
  619. *
  620. * @ctx: The validation context.
  621. *
  622. * The caller still needs to unref resources after a call to this function.
  623. */
  624. void vmw_validation_revert(struct vmw_validation_context *ctx)
  625. {
  626. vmw_validation_bo_backoff(ctx);
  627. vmw_validation_res_unreserve(ctx, true);
  628. if (ctx->res_mutex)
  629. mutex_unlock(ctx->res_mutex);
  630. vmw_validation_unref_lists(ctx);
  631. }
  632. /**
  633. * vmw_validation_cone - Commit validation actions after command submission
  634. * success.
  635. * @ctx: The validation context.
  636. * @fence: Fence with which to fence all buffer objects taking part in the
  637. * command submission.
  638. *
  639. * The caller does NOT need to unref resources after a call to this function.
  640. */
  641. void vmw_validation_done(struct vmw_validation_context *ctx,
  642. struct vmw_fence_obj *fence)
  643. {
  644. vmw_validation_bo_fence(ctx, fence);
  645. vmw_validation_res_unreserve(ctx, false);
  646. if (ctx->res_mutex)
  647. mutex_unlock(ctx->res_mutex);
  648. vmw_validation_unref_lists(ctx);
  649. }
  650. /**
  651. * vmw_validation_preload_bo - Preload the validation memory allocator for a
  652. * call to vmw_validation_add_bo().
  653. * @ctx: Pointer to the validation context.
  654. *
  655. * Iff this function returns successfully, the next call to
  656. * vmw_validation_add_bo() is guaranteed not to sleep. An error is not fatal
  657. * but voids the guarantee.
  658. *
  659. * Returns: Zero if successful, %-EINVAL otherwise.
  660. */
  661. int vmw_validation_preload_bo(struct vmw_validation_context *ctx)
  662. {
  663. unsigned int size = sizeof(struct vmw_validation_bo_node);
  664. if (!vmw_validation_mem_alloc(ctx, size))
  665. return -ENOMEM;
  666. ctx->mem_size_left += size;
  667. return 0;
  668. }
  669. /**
  670. * vmw_validation_preload_res - Preload the validation memory allocator for a
  671. * call to vmw_validation_add_res().
  672. * @ctx: Pointer to the validation context.
  673. * @size: Size of the validation node extra data. See below.
  674. *
  675. * Iff this function returns successfully, the next call to
  676. * vmw_validation_add_res() with the same or smaller @size is guaranteed not to
  677. * sleep. An error is not fatal but voids the guarantee.
  678. *
  679. * Returns: Zero if successful, %-EINVAL otherwise.
  680. */
  681. int vmw_validation_preload_res(struct vmw_validation_context *ctx,
  682. unsigned int size)
  683. {
  684. size = vmw_validation_align(sizeof(struct vmw_validation_res_node) +
  685. size) +
  686. vmw_validation_align(sizeof(struct vmw_validation_bo_node));
  687. if (!vmw_validation_mem_alloc(ctx, size))
  688. return -ENOMEM;
  689. ctx->mem_size_left += size;
  690. return 0;
  691. }