|
@@ -121,7 +121,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
|
|
|
{
|
|
|
struct amdgpu_device *adev = ring->adev;
|
|
|
struct amdgpu_fence *fence;
|
|
|
- struct fence **ptr;
|
|
|
+ struct fence *old, **ptr;
|
|
|
uint32_t seq;
|
|
|
|
|
|
fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
|
|
@@ -141,7 +141,11 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
|
|
|
/* This function can't be called concurrently anyway, otherwise
|
|
|
* emitting the fence would mess up the hardware ring buffer.
|
|
|
*/
|
|
|
- BUG_ON(rcu_dereference_protected(*ptr, 1));
|
|
|
+ old = rcu_dereference_protected(*ptr, 1);
|
|
|
+ if (old && !fence_is_signaled(old)) {
|
|
|
+ DRM_INFO("rcu slot is busy\n");
|
|
|
+ fence_wait(old, false);
|
|
|
+ }
|
|
|
|
|
|
rcu_assign_pointer(*ptr, fence_get(&fence->base));
|
|
|
|