|
@@ -64,6 +64,27 @@ static int check_rbtree(struct intel_engine_cs *engine,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static int check_completion(struct intel_engine_cs *engine,
|
|
|
+ const unsigned long *bitmap,
|
|
|
+ const struct intel_wait *waiters,
|
|
|
+ const int count)
|
|
|
+{
|
|
|
+ int n;
|
|
|
+
|
|
|
+ for (n = 0; n < count; n++) {
|
|
|
+ if (intel_wait_complete(&waiters[n]) != !!test_bit(n, bitmap))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ pr_err("waiter[%d, seqno=%d] is %s, but expected %s\n",
|
|
|
+ n, waiters[n].seqno,
|
|
|
+ intel_wait_complete(&waiters[n]) ? "complete" : "active",
|
|
|
+ test_bit(n, bitmap) ? "active" : "complete");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static int check_rbtree_empty(struct intel_engine_cs *engine)
|
|
|
{
|
|
|
struct intel_breadcrumbs *b = &engine->breadcrumbs;
|
|
@@ -153,10 +174,96 @@ out_engines:
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
+static int igt_insert_complete(void *arg)
|
|
|
+{
|
|
|
+ const u32 seqno_bias = 0x1000;
|
|
|
+ struct intel_engine_cs *engine = arg;
|
|
|
+ struct intel_wait *waiters;
|
|
|
+ const int count = 4096;
|
|
|
+ unsigned long *bitmap;
|
|
|
+ int err = -ENOMEM;
|
|
|
+ int n, m;
|
|
|
+
|
|
|
+ mock_engine_reset(engine);
|
|
|
+
|
|
|
+ waiters = drm_malloc_gfp(count, sizeof(*waiters), GFP_TEMPORARY);
|
|
|
+ if (!waiters)
|
|
|
+ goto out_engines;
|
|
|
+
|
|
|
+ bitmap = kcalloc(DIV_ROUND_UP(count, BITS_PER_LONG), sizeof(*bitmap),
|
|
|
+ GFP_TEMPORARY);
|
|
|
+ if (!bitmap)
|
|
|
+ goto out_waiters;
|
|
|
+
|
|
|
+ for (n = 0; n < count; n++) {
|
|
|
+ intel_wait_init(&waiters[n], n + seqno_bias);
|
|
|
+ intel_engine_add_wait(engine, &waiters[n]);
|
|
|
+ __set_bit(n, bitmap);
|
|
|
+ }
|
|
|
+ err = check_rbtree(engine, bitmap, waiters, count);
|
|
|
+ if (err)
|
|
|
+ goto out_bitmap;
|
|
|
+
|
|
|
+ /* On each step, we advance the seqno so that several waiters are then
|
|
|
+ * complete (we increase the seqno by increasingly larger values to
|
|
|
+ * retire more and more waiters at once). All retired waiters should
|
|
|
+ * be woken and removed from the rbtree, and so that we check.
|
|
|
+ */
|
|
|
+ for (n = 0; n < count; n = m) {
|
|
|
+ int seqno = 2 * n;
|
|
|
+
|
|
|
+ GEM_BUG_ON(find_first_bit(bitmap, count) != n);
|
|
|
+
|
|
|
+ if (intel_wait_complete(&waiters[n])) {
|
|
|
+ pr_err("waiter[%d, seqno=%d] completed too early\n",
|
|
|
+ n, waiters[n].seqno);
|
|
|
+ err = -EINVAL;
|
|
|
+ goto out_bitmap;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* complete the following waiters */
|
|
|
+ mock_seqno_advance(engine, seqno + seqno_bias);
|
|
|
+ for (m = n; m <= seqno; m++) {
|
|
|
+ if (m == count)
|
|
|
+ break;
|
|
|
+
|
|
|
+ GEM_BUG_ON(!test_bit(m, bitmap));
|
|
|
+ __clear_bit(m, bitmap);
|
|
|
+ }
|
|
|
+
|
|
|
+ intel_engine_remove_wait(engine, &waiters[n]);
|
|
|
+ RB_CLEAR_NODE(&waiters[n].node);
|
|
|
+
|
|
|
+ err = check_rbtree(engine, bitmap, waiters, count);
|
|
|
+ if (err) {
|
|
|
+ pr_err("rbtree corrupt after seqno advance to %d\n",
|
|
|
+ seqno + seqno_bias);
|
|
|
+ goto out_bitmap;
|
|
|
+ }
|
|
|
+
|
|
|
+ err = check_completion(engine, bitmap, waiters, count);
|
|
|
+ if (err) {
|
|
|
+ pr_err("completions after seqno advance to %d failed\n",
|
|
|
+ seqno + seqno_bias);
|
|
|
+ goto out_bitmap;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ err = check_rbtree_empty(engine);
|
|
|
+out_bitmap:
|
|
|
+ kfree(bitmap);
|
|
|
+out_waiters:
|
|
|
+ drm_free_large(waiters);
|
|
|
+out_engines:
|
|
|
+ mock_engine_flush(engine);
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
int intel_breadcrumbs_mock_selftests(void)
|
|
|
{
|
|
|
static const struct i915_subtest tests[] = {
|
|
|
SUBTEST(igt_random_insert_remove),
|
|
|
+ SUBTEST(igt_insert_complete),
|
|
|
};
|
|
|
struct intel_engine_cs *engine;
|
|
|
int err;
|