|
@@ -75,11 +75,6 @@ int dma_fence_signal_locked(struct dma_fence *fence)
|
|
|
if (WARN_ON(!fence))
|
|
|
return -EINVAL;
|
|
|
|
|
|
- if (!ktime_to_ns(fence->timestamp)) {
|
|
|
- fence->timestamp = ktime_get();
|
|
|
- smp_mb__before_atomic();
|
|
|
- }
|
|
|
-
|
|
|
if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
|
|
|
ret = -EINVAL;
|
|
|
|
|
@@ -87,8 +82,11 @@ int dma_fence_signal_locked(struct dma_fence *fence)
|
|
|
* we might have raced with the unlocked dma_fence_signal,
|
|
|
* still run through all callbacks
|
|
|
*/
|
|
|
- } else
|
|
|
+ } else {
|
|
|
+ fence->timestamp = ktime_get();
|
|
|
+ set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
|
|
|
trace_dma_fence_signaled(fence);
|
|
|
+ }
|
|
|
|
|
|
list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
|
|
|
list_del_init(&cur->node);
|
|
@@ -115,14 +113,11 @@ int dma_fence_signal(struct dma_fence *fence)
|
|
|
if (!fence)
|
|
|
return -EINVAL;
|
|
|
|
|
|
- if (!ktime_to_ns(fence->timestamp)) {
|
|
|
- fence->timestamp = ktime_get();
|
|
|
- smp_mb__before_atomic();
|
|
|
- }
|
|
|
-
|
|
|
if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
|
|
|
return -EINVAL;
|
|
|
|
|
|
+ fence->timestamp = ktime_get();
|
|
|
+ set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
|
|
|
trace_dma_fence_signaled(fence);
|
|
|
|
|
|
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
|