|
@@ -468,10 +468,11 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
|
|
|
desc = execlists_update_context(rq);
|
|
|
GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
|
|
|
|
|
|
- GEM_TRACE("%s in[%d]: ctx=%d.%d, seqno=%d (current %d), prio=%d\n",
|
|
|
+ GEM_TRACE("%s in[%d]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n",
|
|
|
engine->name, n,
|
|
|
port[n].context_id, count,
|
|
|
rq->global_seqno,
|
|
|
+ rq->fence.context, rq->fence.seqno,
|
|
|
intel_engine_get_seqno(engine),
|
|
|
rq_prio(rq));
|
|
|
} else {
|
|
@@ -742,6 +743,13 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
|
|
|
while (num_ports-- && port_isset(port)) {
|
|
|
struct i915_request *rq = port_request(port);
|
|
|
|
|
|
+ GEM_TRACE("%s:port%u global=%d (fence %llx:%d), (current %d)\n",
|
|
|
+ rq->engine->name,
|
|
|
+ (unsigned int)(port - execlists->port),
|
|
|
+ rq->global_seqno,
|
|
|
+ rq->fence.context, rq->fence.seqno,
|
|
|
+ intel_engine_get_seqno(rq->engine));
|
|
|
+
|
|
|
GEM_BUG_ON(!execlists->active);
|
|
|
intel_engine_context_out(rq->engine);
|
|
|
|
|
@@ -817,7 +825,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
|
|
|
struct rb_node *rb;
|
|
|
unsigned long flags;
|
|
|
|
|
|
- GEM_TRACE("%s\n", engine->name);
|
|
|
+ GEM_TRACE("%s current %d\n",
|
|
|
+ engine->name, intel_engine_get_seqno(engine));
|
|
|
|
|
|
/*
|
|
|
* Before we call engine->cancel_requests(), we should have exclusive
|
|
@@ -1014,10 +1023,12 @@ static void execlists_submission_tasklet(unsigned long data)
|
|
|
EXECLISTS_ACTIVE_USER));
|
|
|
|
|
|
rq = port_unpack(port, &count);
|
|
|
- GEM_TRACE("%s out[0]: ctx=%d.%d, seqno=%d (current %d), prio=%d\n",
|
|
|
+ GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n",
|
|
|
engine->name,
|
|
|
port->context_id, count,
|
|
|
rq ? rq->global_seqno : 0,
|
|
|
+ rq ? rq->fence.context : 0,
|
|
|
+ rq ? rq->fence.seqno : 0,
|
|
|
intel_engine_get_seqno(engine),
|
|
|
rq ? rq_prio(rq) : 0);
|
|
|
|
|
@@ -1744,8 +1755,9 @@ static void reset_common_ring(struct intel_engine_cs *engine,
|
|
|
struct intel_context *ce;
|
|
|
unsigned long flags;
|
|
|
|
|
|
- GEM_TRACE("%s seqno=%x\n",
|
|
|
- engine->name, request ? request->global_seqno : 0);
|
|
|
+ GEM_TRACE("%s request global=%x, current=%d\n",
|
|
|
+ engine->name, request ? request->global_seqno : 0,
|
|
|
+ intel_engine_get_seqno(engine));
|
|
|
|
|
|
/* See execlists_cancel_requests() for the irq/spinlock split. */
|
|
|
local_irq_save(flags);
|