|
@@ -1223,8 +1223,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
|
|
|
|
|
|
WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
|
|
|
|
|
|
- if (i915_seqno_passed(ring->get_seqno(ring, true),
|
|
|
- i915_gem_request_get_seqno(req)))
|
|
|
+ if (i915_gem_request_completed(req, true))
|
|
|
return 0;
|
|
|
|
|
|
timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0;
|
|
@@ -1260,8 +1259,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- if (i915_seqno_passed(ring->get_seqno(ring, false),
|
|
|
- i915_gem_request_get_seqno(req))) {
|
|
|
+ if (i915_gem_request_completed(req, false)) {
|
|
|
ret = 0;
|
|
|
break;
|
|
|
}
|
|
@@ -2333,8 +2331,7 @@ i915_gem_object_retire(struct drm_i915_gem_object *obj)
|
|
|
if (ring == NULL)
|
|
|
return;
|
|
|
|
|
|
- if (i915_seqno_passed(ring->get_seqno(ring, true),
|
|
|
- i915_gem_request_get_seqno(obj->last_read_req)))
|
|
|
+ if (i915_gem_request_completed(obj->last_read_req, true))
|
|
|
i915_gem_object_move_to_inactive(obj);
|
|
|
}
|
|
|
|
|
@@ -2601,12 +2598,9 @@ struct drm_i915_gem_request *
|
|
|
i915_gem_find_active_request(struct intel_engine_cs *ring)
|
|
|
{
|
|
|
struct drm_i915_gem_request *request;
|
|
|
- u32 completed_seqno;
|
|
|
-
|
|
|
- completed_seqno = ring->get_seqno(ring, false);
|
|
|
|
|
|
list_for_each_entry(request, &ring->request_list, list) {
|
|
|
- if (i915_seqno_passed(completed_seqno, request->seqno))
|
|
|
+ if (i915_gem_request_completed(request, false))
|
|
|
continue;
|
|
|
|
|
|
return request;
|
|
@@ -2734,15 +2728,11 @@ void i915_gem_reset(struct drm_device *dev)
|
|
|
void
|
|
|
i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
|
|
|
{
|
|
|
- uint32_t seqno;
|
|
|
-
|
|
|
if (list_empty(&ring->request_list))
|
|
|
return;
|
|
|
|
|
|
WARN_ON(i915_verify_lists(ring->dev));
|
|
|
|
|
|
- seqno = ring->get_seqno(ring, true);
|
|
|
-
|
|
|
/* Move any buffers on the active list that are no longer referenced
|
|
|
* by the ringbuffer to the flushing/inactive lists as appropriate,
|
|
|
* before we free the context associated with the requests.
|
|
@@ -2754,8 +2744,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
|
|
|
struct drm_i915_gem_object,
|
|
|
ring_list);
|
|
|
|
|
|
- if (!i915_seqno_passed(seqno,
|
|
|
- i915_gem_request_get_seqno(obj->last_read_req)))
|
|
|
+ if (!i915_gem_request_completed(obj->last_read_req, true))
|
|
|
break;
|
|
|
|
|
|
i915_gem_object_move_to_inactive(obj);
|
|
@@ -2770,7 +2759,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
|
|
|
struct drm_i915_gem_request,
|
|
|
list);
|
|
|
|
|
|
- if (!i915_seqno_passed(seqno, request->seqno))
|
|
|
+ if (!i915_gem_request_completed(request, true))
|
|
|
break;
|
|
|
|
|
|
trace_i915_gem_request_retire(request);
|
|
@@ -2797,7 +2786,8 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
|
|
|
}
|
|
|
|
|
|
if (unlikely(ring->trace_irq_seqno &&
|
|
|
- i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
|
|
|
+ i915_seqno_passed(ring->get_seqno(ring, true),
|
|
|
+ ring->trace_irq_seqno))) {
|
|
|
ring->irq_put(ring);
|
|
|
ring->trace_irq_seqno = 0;
|
|
|
}
|