i915_gem.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221
  1. /*
  2. * SPDX-License-Identifier: MIT
  3. *
  4. * Copyright © 2018 Intel Corporation
  5. */
  6. #include <linux/random.h>
  7. #include "../i915_selftest.h"
  8. #include "mock_context.h"
  9. #include "igt_flush_test.h"
  10. static int switch_to_context(struct drm_i915_private *i915,
  11. struct i915_gem_context *ctx)
  12. {
  13. struct intel_engine_cs *engine;
  14. enum intel_engine_id id;
  15. int err = 0;
  16. intel_runtime_pm_get(i915);
  17. for_each_engine(engine, i915, id) {
  18. struct i915_request *rq;
  19. rq = i915_request_alloc(engine, ctx);
  20. if (IS_ERR(rq)) {
  21. err = PTR_ERR(rq);
  22. break;
  23. }
  24. i915_request_add(rq);
  25. }
  26. intel_runtime_pm_put(i915);
  27. return err;
  28. }
  29. static void trash_stolen(struct drm_i915_private *i915)
  30. {
  31. struct i915_ggtt *ggtt = &i915->ggtt;
  32. const u64 slot = ggtt->error_capture.start;
  33. const resource_size_t size = resource_size(&i915->dsm);
  34. unsigned long page;
  35. u32 prng = 0x12345678;
  36. for (page = 0; page < size; page += PAGE_SIZE) {
  37. const dma_addr_t dma = i915->dsm.start + page;
  38. u32 __iomem *s;
  39. int x;
  40. ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
  41. s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
  42. for (x = 0; x < PAGE_SIZE / sizeof(u32); x++) {
  43. prng = next_pseudo_random32(prng);
  44. iowrite32(prng, &s[x]);
  45. }
  46. io_mapping_unmap_atomic(s);
  47. }
  48. ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
  49. }
  50. static void simulate_hibernate(struct drm_i915_private *i915)
  51. {
  52. intel_runtime_pm_get(i915);
  53. /*
  54. * As a final sting in the tail, invalidate stolen. Under a real S4,
  55. * stolen is lost and needs to be refilled on resume. However, under
  56. * CI we merely do S4-device testing (as full S4 is too unreliable
  57. * for automated testing across a cluster), so to simulate the effect
  58. * of stolen being trashed across S4, we trash it ourselves.
  59. */
  60. trash_stolen(i915);
  61. intel_runtime_pm_put(i915);
  62. }
  63. static int pm_prepare(struct drm_i915_private *i915)
  64. {
  65. int err = 0;
  66. if (i915_gem_suspend(i915)) {
  67. pr_err("i915_gem_suspend failed\n");
  68. err = -EINVAL;
  69. }
  70. return err;
  71. }
  72. static void pm_suspend(struct drm_i915_private *i915)
  73. {
  74. intel_runtime_pm_get(i915);
  75. i915_gem_suspend_gtt_mappings(i915);
  76. i915_gem_suspend_late(i915);
  77. intel_runtime_pm_put(i915);
  78. }
  79. static void pm_hibernate(struct drm_i915_private *i915)
  80. {
  81. intel_runtime_pm_get(i915);
  82. i915_gem_suspend_gtt_mappings(i915);
  83. i915_gem_freeze(i915);
  84. i915_gem_freeze_late(i915);
  85. intel_runtime_pm_put(i915);
  86. }
  87. static void pm_resume(struct drm_i915_private *i915)
  88. {
  89. /*
  90. * Both suspend and hibernate follow the same wakeup path and assume
  91. * that runtime-pm just works.
  92. */
  93. intel_runtime_pm_get(i915);
  94. intel_engines_sanitize(i915);
  95. i915_gem_sanitize(i915);
  96. i915_gem_resume(i915);
  97. intel_runtime_pm_put(i915);
  98. }
  99. static int igt_gem_suspend(void *arg)
  100. {
  101. struct drm_i915_private *i915 = arg;
  102. struct i915_gem_context *ctx;
  103. struct drm_file *file;
  104. int err;
  105. file = mock_file(i915);
  106. if (IS_ERR(file))
  107. return PTR_ERR(file);
  108. err = -ENOMEM;
  109. mutex_lock(&i915->drm.struct_mutex);
  110. ctx = live_context(i915, file);
  111. if (!IS_ERR(ctx))
  112. err = switch_to_context(i915, ctx);
  113. mutex_unlock(&i915->drm.struct_mutex);
  114. if (err)
  115. goto out;
  116. err = pm_prepare(i915);
  117. if (err)
  118. goto out;
  119. pm_suspend(i915);
  120. /* Here be dragons! Note that with S3RST any S3 may become S4! */
  121. simulate_hibernate(i915);
  122. pm_resume(i915);
  123. mutex_lock(&i915->drm.struct_mutex);
  124. err = switch_to_context(i915, ctx);
  125. if (igt_flush_test(i915, I915_WAIT_LOCKED))
  126. err = -EIO;
  127. mutex_unlock(&i915->drm.struct_mutex);
  128. out:
  129. mock_file_free(i915, file);
  130. return err;
  131. }
  132. static int igt_gem_hibernate(void *arg)
  133. {
  134. struct drm_i915_private *i915 = arg;
  135. struct i915_gem_context *ctx;
  136. struct drm_file *file;
  137. int err;
  138. file = mock_file(i915);
  139. if (IS_ERR(file))
  140. return PTR_ERR(file);
  141. err = -ENOMEM;
  142. mutex_lock(&i915->drm.struct_mutex);
  143. ctx = live_context(i915, file);
  144. if (!IS_ERR(ctx))
  145. err = switch_to_context(i915, ctx);
  146. mutex_unlock(&i915->drm.struct_mutex);
  147. if (err)
  148. goto out;
  149. err = pm_prepare(i915);
  150. if (err)
  151. goto out;
  152. pm_hibernate(i915);
  153. /* Here be dragons! */
  154. simulate_hibernate(i915);
  155. pm_resume(i915);
  156. mutex_lock(&i915->drm.struct_mutex);
  157. err = switch_to_context(i915, ctx);
  158. if (igt_flush_test(i915, I915_WAIT_LOCKED))
  159. err = -EIO;
  160. mutex_unlock(&i915->drm.struct_mutex);
  161. out:
  162. mock_file_free(i915, file);
  163. return err;
  164. }
  165. int i915_gem_live_selftests(struct drm_i915_private *i915)
  166. {
  167. static const struct i915_subtest tests[] = {
  168. SUBTEST(igt_gem_suspend),
  169. SUBTEST(igt_gem_hibernate),
  170. };
  171. return i915_subtests(tests, i915);
  172. }