|
@@ -322,6 +322,41 @@ struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * amdgpu_sync_clone - clone a sync object
|
|
|
+ *
|
|
|
+ * @source: sync object to clone
|
|
|
+ * @clone: pointer to destination sync object
|
|
|
+ *
|
|
|
+ * Adds references to all unsignaled fences in @source to @clone. Also
|
|
|
+ * removes signaled fences from @source while at it.
|
|
|
+ */
|
|
|
+int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
|
|
|
+{
|
|
|
+ struct amdgpu_sync_entry *e;
|
|
|
+ struct hlist_node *tmp;
|
|
|
+ struct dma_fence *f;
|
|
|
+ int i, r;
|
|
|
+
|
|
|
+ hash_for_each_safe(source->fences, i, tmp, e, node) {
|
|
|
+ f = e->fence;
|
|
|
+ if (!dma_fence_is_signaled(f)) {
|
|
|
+ r = amdgpu_sync_fence(NULL, clone, f, e->explicit);
|
|
|
+ if (r)
|
|
|
+ return r;
|
|
|
+ } else {
|
|
|
+ hash_del(&e->node);
|
|
|
+ dma_fence_put(f);
|
|
|
+ kmem_cache_free(amdgpu_sync_slab, e);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ dma_fence_put(clone->last_vm_update);
|
|
|
+ clone->last_vm_update = dma_fence_get(source->last_vm_update);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
|
|
|
{
|
|
|
struct amdgpu_sync_entry *e;
|