|
@@ -1936,11 +1936,27 @@ out:
|
|
|
void
|
|
|
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
|
|
|
{
|
|
|
+ /* Serialisation between user GTT access and our code depends upon
|
|
|
+ * revoking the CPU's PTE whilst the mutex is held. The next user
|
|
|
+ * pagefault then has to wait until we release the mutex.
|
|
|
+ */
|
|
|
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
|
|
|
+
|
|
|
if (!obj->fault_mappable)
|
|
|
return;
|
|
|
|
|
|
drm_vma_node_unmap(&obj->base.vma_node,
|
|
|
obj->base.dev->anon_inode->i_mapping);
|
|
|
+
|
|
|
+ /* Ensure that the CPU's PTE are revoked and there are not outstanding
|
|
|
+ * memory transactions from userspace before we return. The TLB
|
|
|
+ * flushing implied above by changing the PTE above *should* be
|
|
|
+ * sufficient, an extra barrier here just provides us with a bit
|
|
|
+ * of paranoid documentation about our requirement to serialise
|
|
|
+ * memory writes before touching registers / GSM.
|
|
|
+ */
|
|
|
+ wmb();
|
|
|
+
|
|
|
obj->fault_mappable = false;
|
|
|
}
|
|
|
|
|
@@ -3324,9 +3340,6 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
|
|
|
if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
|
|
|
return;
|
|
|
|
|
|
- /* Wait for any direct GTT access to complete */
|
|
|
- mb();
|
|
|
-
|
|
|
old_read_domains = obj->base.read_domains;
|
|
|
old_write_domain = obj->base.write_domain;
|
|
|
|