Browse Source

drm/i915/selftests: Replace wmb() with i915_gem_chipset_flush()

Currently, we are being fairly lazy and only using a wmb() following an
update to an active batch. Previously, we have found that to be
insufficient to ensure that a write from the CPU reaches memory in a
timely fashion, and in some caches we may need to flush a chipset cache.
To that end, we have i915_gem_chipset_flush() so use it.

Suggested-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20170926153409.7928-1-chris@chris-wilson.co.uk
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Chris Wilson 7 years ago
parent
commit
60456d5c2d

+ 7 - 3
drivers/gpu/drm/i915/selftests/i915_gem_request.c

@@ -418,7 +418,10 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
 		err = PTR_ERR(cmd);
 		goto err;
 	}
+
 	*cmd = MI_BATCH_BUFFER_END;
+	i915_gem_chipset_flush(i915);
+
 	i915_gem_object_unpin_map(obj);
 
 	err = i915_gem_object_set_to_gtt_domain(obj, false);
@@ -605,8 +608,8 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
 		*cmd++ = lower_32_bits(vma->node.start);
 	}
 	*cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
+	i915_gem_chipset_flush(i915);
 
-	wmb();
 	i915_gem_object_unpin_map(obj);
 
 	return vma;
@@ -625,7 +628,7 @@ static int recursive_batch_resolve(struct i915_vma *batch)
 		return PTR_ERR(cmd);
 
 	*cmd = MI_BATCH_BUFFER_END;
-	wmb();
+	i915_gem_chipset_flush(batch->vm->i915);
 
 	i915_gem_object_unpin_map(batch->obj);
 
@@ -858,7 +861,8 @@ out_request:
 					      I915_MAP_WC);
 		if (!IS_ERR(cmd)) {
 			*cmd = MI_BATCH_BUFFER_END;
-			wmb();
+			i915_gem_chipset_flush(i915);
+
 			i915_gem_object_unpin_map(request[id]->batch->obj);
 		}
 

+ 5 - 2
drivers/gpu/drm/i915/selftests/intel_hangcheck.c

@@ -165,6 +165,7 @@ static int emit_recurse_batch(struct hang *h,
 		*batch++ = lower_32_bits(vma->node.start);
 	}
 	*batch++ = MI_BATCH_BUFFER_END; /* not reached */
+	i915_gem_chipset_flush(h->i915);
 
 	flags = 0;
 	if (INTEL_GEN(vm->i915) <= 5)
@@ -231,7 +232,7 @@ static u32 hws_seqno(const struct hang *h,
 static void hang_fini(struct hang *h)
 {
 	*h->batch = MI_BATCH_BUFFER_END;
-	wmb();
+	i915_gem_chipset_flush(h->i915);
 
 	i915_gem_object_unpin_map(h->obj);
 	i915_gem_object_put(h->obj);
@@ -275,6 +276,8 @@ static int igt_hang_sanitycheck(void *arg)
 		i915_gem_request_get(rq);
 
 		*h.batch = MI_BATCH_BUFFER_END;
+		i915_gem_chipset_flush(i915);
+
 		__i915_add_request(rq, true);
 
 		timeout = i915_wait_request(rq,
@@ -765,7 +768,7 @@ static int igt_reset_queue(void *arg)
 		pr_info("%s: Completed %d resets\n", engine->name, count);
 
 		*h.batch = MI_BATCH_BUFFER_END;
-		wmb();
+		i915_gem_chipset_flush(i915);
 
 		i915_gem_request_put(prev);
 	}