|
@@ -479,10 +479,11 @@ static void __fence_set_priority(struct dma_fence *fence, int prio)
|
|
|
|
|
|
rq = to_request(fence);
|
|
rq = to_request(fence);
|
|
engine = rq->engine;
|
|
engine = rq->engine;
|
|
- if (!engine->schedule)
|
|
|
|
- return;
|
|
|
|
|
|
|
|
- engine->schedule(rq, prio);
|
|
|
|
|
|
+ rcu_read_lock();
|
|
|
|
+ if (engine->schedule)
|
|
|
|
+ engine->schedule(rq, prio);
|
|
|
|
+ rcu_read_unlock();
|
|
}
|
|
}
|
|
|
|
|
|
static void fence_set_priority(struct dma_fence *fence, int prio)
|
|
static void fence_set_priority(struct dma_fence *fence, int prio)
|
|
@@ -3222,8 +3223,11 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
|
|
*/
|
|
*/
|
|
for_each_engine(engine, i915, id) {
|
|
for_each_engine(engine, i915, id) {
|
|
i915_gem_reset_prepare_engine(engine);
|
|
i915_gem_reset_prepare_engine(engine);
|
|
|
|
+
|
|
engine->submit_request = nop_submit_request;
|
|
engine->submit_request = nop_submit_request;
|
|
|
|
+ engine->schedule = NULL;
|
|
}
|
|
}
|
|
|
|
+ i915->caps.scheduler = 0;
|
|
|
|
|
|
/*
|
|
/*
|
|
* Make sure no one is running the old callback before we proceed with
|
|
* Make sure no one is running the old callback before we proceed with
|
|
@@ -3241,11 +3245,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
|
|
* start to complete all requests.
|
|
* start to complete all requests.
|
|
*/
|
|
*/
|
|
engine->submit_request = nop_complete_submit_request;
|
|
engine->submit_request = nop_complete_submit_request;
|
|
- engine->schedule = NULL;
|
|
|
|
}
|
|
}
|
|
|
|
|
|
- i915->caps.scheduler = 0;
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Make sure no request can slip through without getting completed by
|
|
* Make sure no request can slip through without getting completed by
|
|
* either this call here to intel_engine_init_global_seqno, or the one
|
|
* either this call here to intel_engine_init_global_seqno, or the one
|