i915_gem_batch_pool.c 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140
  1. /*
  2. * SPDX-License-Identifier: MIT
  3. *
  4. * Copyright © 2014-2018 Intel Corporation
  5. */
  6. #include "i915_gem_batch_pool.h"
  7. #include "i915_drv.h"
  8. /**
  9. * DOC: batch pool
  10. *
  11. * In order to submit batch buffers as 'secure', the software command parser
  12. * must ensure that a batch buffer cannot be modified after parsing. It does
  13. * this by copying the user provided batch buffer contents to a kernel owned
  14. * buffer from which the hardware will actually execute, and by carefully
  15. * managing the address space bindings for such buffers.
  16. *
  17. * The batch pool framework provides a mechanism for the driver to manage a
  18. * set of scratch buffers to use for this purpose. The framework can be
  19. * extended to support other uses cases should they arise.
  20. */
  21. /**
  22. * i915_gem_batch_pool_init() - initialize a batch buffer pool
  23. * @pool: the batch buffer pool
  24. * @engine: the associated request submission engine
  25. */
  26. void i915_gem_batch_pool_init(struct i915_gem_batch_pool *pool,
  27. struct intel_engine_cs *engine)
  28. {
  29. int n;
  30. pool->engine = engine;
  31. for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
  32. INIT_LIST_HEAD(&pool->cache_list[n]);
  33. }
  34. /**
  35. * i915_gem_batch_pool_fini() - clean up a batch buffer pool
  36. * @pool: the pool to clean up
  37. *
  38. * Note: Callers must hold the struct_mutex.
  39. */
  40. void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
  41. {
  42. int n;
  43. lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
  44. for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
  45. struct drm_i915_gem_object *obj, *next;
  46. list_for_each_entry_safe(obj, next,
  47. &pool->cache_list[n],
  48. batch_pool_link)
  49. __i915_gem_object_release_unless_active(obj);
  50. INIT_LIST_HEAD(&pool->cache_list[n]);
  51. }
  52. }
  53. /**
  54. * i915_gem_batch_pool_get() - allocate a buffer from the pool
  55. * @pool: the batch buffer pool
  56. * @size: the minimum desired size of the returned buffer
  57. *
  58. * Returns an inactive buffer from @pool with at least @size bytes,
  59. * with the pages pinned. The caller must i915_gem_object_unpin_pages()
  60. * on the returned object.
  61. *
  62. * Note: Callers must hold the struct_mutex
  63. *
  64. * Return: the buffer object or an error pointer
  65. */
  66. struct drm_i915_gem_object *
  67. i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
  68. size_t size)
  69. {
  70. struct drm_i915_gem_object *obj;
  71. struct list_head *list;
  72. int n, ret;
  73. lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
  74. /* Compute a power-of-two bucket, but throw everything greater than
  75. * 16KiB into the same bucket: i.e. the the buckets hold objects of
  76. * (1 page, 2 pages, 4 pages, 8+ pages).
  77. */
  78. n = fls(size >> PAGE_SHIFT) - 1;
  79. if (n >= ARRAY_SIZE(pool->cache_list))
  80. n = ARRAY_SIZE(pool->cache_list) - 1;
  81. list = &pool->cache_list[n];
  82. list_for_each_entry(obj, list, batch_pool_link) {
  83. /* The batches are strictly LRU ordered */
  84. if (i915_gem_object_is_active(obj)) {
  85. struct reservation_object *resv = obj->resv;
  86. if (!reservation_object_test_signaled_rcu(resv, true))
  87. break;
  88. i915_retire_requests(pool->engine->i915);
  89. GEM_BUG_ON(i915_gem_object_is_active(obj));
  90. /*
  91. * The object is now idle, clear the array of shared
  92. * fences before we add a new request. Although, we
  93. * remain on the same engine, we may be on a different
  94. * timeline and so may continually grow the array,
  95. * trapping a reference to all the old fences, rather
  96. * than replace the existing fence.
  97. */
  98. if (rcu_access_pointer(resv->fence)) {
  99. reservation_object_lock(resv, NULL);
  100. reservation_object_add_excl_fence(resv, NULL);
  101. reservation_object_unlock(resv);
  102. }
  103. }
  104. GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->resv,
  105. true));
  106. if (obj->base.size >= size)
  107. goto found;
  108. }
  109. obj = i915_gem_object_create_internal(pool->engine->i915, size);
  110. if (IS_ERR(obj))
  111. return obj;
  112. found:
  113. ret = i915_gem_object_pin_pages(obj);
  114. if (ret)
  115. return ERR_PTR(ret);
  116. list_move_tail(&obj->batch_pool_link, list);
  117. return obj;
  118. }