i915_gem_timeline.h 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123
  1. /*
  2. * Copyright © 2016 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. */
  24. #ifndef I915_GEM_TIMELINE_H
  25. #define I915_GEM_TIMELINE_H
  26. #include <linux/list.h>
  27. #include "i915_utils.h"
  28. #include "i915_gem_request.h"
  29. #include "i915_syncmap.h"
  30. struct i915_gem_timeline;
  31. struct intel_timeline {
  32. u64 fence_context;
  33. u32 seqno;
  34. /**
  35. * Count of outstanding requests, from the time they are constructed
  36. * to the moment they are retired. Loosely coupled to hardware.
  37. */
  38. u32 inflight_seqnos;
  39. spinlock_t lock;
  40. /**
  41. * List of breadcrumbs associated with GPU requests currently
  42. * outstanding.
  43. */
  44. struct list_head requests;
  45. /* Contains an RCU guarded pointer to the last request. No reference is
  46. * held to the request, users must carefully acquire a reference to
  47. * the request using i915_gem_active_get_request_rcu(), or hold the
  48. * struct_mutex.
  49. */
  50. struct i915_gem_active last_request;
  51. /**
  52. * We track the most recent seqno that we wait on in every context so
  53. * that we only have to emit a new await and dependency on a more
  54. * recent sync point. As the contexts may be executed out-of-order, we
  55. * have to track each individually and can not rely on an absolute
  56. * global_seqno. When we know that all tracked fences are completed
  57. * (i.e. when the driver is idle), we know that the syncmap is
  58. * redundant and we can discard it without loss of generality.
  59. */
  60. struct i915_syncmap *sync;
  61. /**
  62. * Separately to the inter-context seqno map above, we track the last
  63. * barrier (e.g. semaphore wait) to the global engine timelines. Note
  64. * that this tracks global_seqno rather than the context.seqno, and
  65. * so it is subject to the limitations of hw wraparound and that we
  66. * may need to revoke global_seqno (on pre-emption).
  67. */
  68. u32 global_sync[I915_NUM_ENGINES];
  69. struct i915_gem_timeline *common;
  70. };
  71. struct i915_gem_timeline {
  72. struct list_head link;
  73. struct drm_i915_private *i915;
  74. const char *name;
  75. struct intel_timeline engine[I915_NUM_ENGINES];
  76. };
  77. int i915_gem_timeline_init(struct drm_i915_private *i915,
  78. struct i915_gem_timeline *tl,
  79. const char *name);
  80. int i915_gem_timeline_init__global(struct drm_i915_private *i915);
  81. void i915_gem_timelines_mark_idle(struct drm_i915_private *i915);
  82. void i915_gem_timeline_fini(struct i915_gem_timeline *tl);
  83. static inline int __intel_timeline_sync_set(struct intel_timeline *tl,
  84. u64 context, u32 seqno)
  85. {
  86. return i915_syncmap_set(&tl->sync, context, seqno);
  87. }
  88. static inline int intel_timeline_sync_set(struct intel_timeline *tl,
  89. const struct dma_fence *fence)
  90. {
  91. return __intel_timeline_sync_set(tl, fence->context, fence->seqno);
  92. }
  93. static inline bool __intel_timeline_sync_is_later(struct intel_timeline *tl,
  94. u64 context, u32 seqno)
  95. {
  96. return i915_syncmap_is_later(&tl->sync, context, seqno);
  97. }
  98. static inline bool intel_timeline_sync_is_later(struct intel_timeline *tl,
  99. const struct dma_fence *fence)
  100. {
  101. return __intel_timeline_sync_is_later(tl, fence->context, fence->seqno);
  102. }
  103. #endif