|
@@ -1213,7 +1213,6 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
|
|
|
/**
|
|
|
* __i915_wait_request - wait until execution of request has finished
|
|
|
* @req: duh!
|
|
|
- * @reset_counter: reset sequence associated with the given request
|
|
|
* @interruptible: do an interruptible wait (normally yes)
|
|
|
* @timeout: in - how long to wait (NULL forever); out - how much time remaining
|
|
|
*
|
|
@@ -1228,7 +1227,6 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
|
|
|
* errno with remaining time filled in timeout argument.
|
|
|
*/
|
|
|
int __i915_wait_request(struct drm_i915_gem_request *req,
|
|
|
- unsigned reset_counter,
|
|
|
bool interruptible,
|
|
|
s64 *timeout,
|
|
|
struct intel_rps_client *rps)
|
|
@@ -1290,7 +1288,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
|
|
|
|
|
|
/* We need to check whether any gpu reset happened in between
|
|
|
* the caller grabbing the seqno and now ... */
|
|
|
- if (reset_counter != i915_reset_counter(&dev_priv->gpu_error)) {
|
|
|
+ if (req->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) {
|
|
|
/* ... but upgrade the -EAGAIN to an -EIO if the gpu
|
|
|
* is truely gone. */
|
|
|
ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
|
|
@@ -1460,13 +1458,7 @@ i915_wait_request(struct drm_i915_gem_request *req)
|
|
|
|
|
|
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
|
|
|
|
|
|
- ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
|
|
|
- if (ret)
|
|
|
- return ret;
|
|
|
-
|
|
|
- ret = __i915_wait_request(req,
|
|
|
- i915_reset_counter(&dev_priv->gpu_error),
|
|
|
- interruptible, NULL, NULL);
|
|
|
+ ret = __i915_wait_request(req, interruptible, NULL, NULL);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
@@ -1541,7 +1533,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
|
|
|
struct drm_device *dev = obj->base.dev;
|
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
|
|
|
- unsigned reset_counter;
|
|
|
int ret, i, n = 0;
|
|
|
|
|
|
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
|
|
@@ -1550,12 +1541,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
|
|
|
if (!obj->active)
|
|
|
return 0;
|
|
|
|
|
|
- ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
|
|
|
- if (ret)
|
|
|
- return ret;
|
|
|
-
|
|
|
- reset_counter = i915_reset_counter(&dev_priv->gpu_error);
|
|
|
-
|
|
|
if (readonly) {
|
|
|
struct drm_i915_gem_request *req;
|
|
|
|
|
@@ -1577,9 +1562,9 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
|
|
|
}
|
|
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
+ ret = 0;
|
|
|
for (i = 0; ret == 0 && i < n; i++)
|
|
|
- ret = __i915_wait_request(requests[i], reset_counter, true,
|
|
|
- NULL, rps);
|
|
|
+ ret = __i915_wait_request(requests[i], true, NULL, rps);
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
|
|
|
for (i = 0; i < n; i++) {
|
|
@@ -2735,6 +2720,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
|
|
|
struct drm_i915_gem_request **req_out)
|
|
|
{
|
|
|
struct drm_i915_private *dev_priv = to_i915(engine->dev);
|
|
|
+ unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
|
|
|
struct drm_i915_gem_request *req;
|
|
|
int ret;
|
|
|
|
|
@@ -2743,6 +2729,11 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
|
|
|
|
|
|
*req_out = NULL;
|
|
|
|
|
|
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error,
|
|
|
+ dev_priv->mm.interruptible);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
|
|
|
if (req == NULL)
|
|
|
return -ENOMEM;
|
|
@@ -2754,6 +2745,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
|
|
|
kref_init(&req->ref);
|
|
|
req->i915 = dev_priv;
|
|
|
req->engine = engine;
|
|
|
+ req->reset_counter = reset_counter;
|
|
|
req->ctx = ctx;
|
|
|
i915_gem_context_reference(req->ctx);
|
|
|
|
|
@@ -3132,11 +3124,9 @@ retire:
|
|
|
int
|
|
|
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
|
|
{
|
|
|
- struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
struct drm_i915_gem_wait *args = data;
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
struct drm_i915_gem_request *req[I915_NUM_ENGINES];
|
|
|
- unsigned reset_counter;
|
|
|
int i, n = 0;
|
|
|
int ret;
|
|
|
|
|
@@ -3170,7 +3160,6 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
|
|
}
|
|
|
|
|
|
drm_gem_object_unreference(&obj->base);
|
|
|
- reset_counter = i915_reset_counter(&dev_priv->gpu_error);
|
|
|
|
|
|
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
|
|
if (obj->last_read_req[i] == NULL)
|
|
@@ -3183,7 +3172,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
|
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
if (ret == 0)
|
|
|
- ret = __i915_wait_request(req[i], reset_counter, true,
|
|
|
+ ret = __i915_wait_request(req[i], true,
|
|
|
args->timeout_ns > 0 ? &args->timeout_ns : NULL,
|
|
|
to_rps_client(file));
|
|
|
i915_gem_request_unreference__unlocked(req[i]);
|
|
@@ -3215,7 +3204,6 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
|
|
|
if (!i915_semaphore_is_enabled(obj->base.dev)) {
|
|
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
|
|
ret = __i915_wait_request(from_req,
|
|
|
- i915_reset_counter(&i915->gpu_error),
|
|
|
i915->mm.interruptible,
|
|
|
NULL,
|
|
|
&i915->rps.semaphores);
|
|
@@ -4171,7 +4159,6 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
|
|
|
struct drm_i915_file_private *file_priv = file->driver_priv;
|
|
|
unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
|
|
|
struct drm_i915_gem_request *request, *target = NULL;
|
|
|
- unsigned reset_counter;
|
|
|
int ret;
|
|
|
|
|
|
ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
|
|
@@ -4196,7 +4183,6 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
|
|
|
|
|
|
target = request;
|
|
|
}
|
|
|
- reset_counter = i915_reset_counter(&dev_priv->gpu_error);
|
|
|
if (target)
|
|
|
i915_gem_request_reference(target);
|
|
|
spin_unlock(&file_priv->mm.lock);
|
|
@@ -4204,7 +4190,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
|
|
|
if (target == NULL)
|
|
|
return 0;
|
|
|
|
|
|
- ret = __i915_wait_request(target, reset_counter, true, NULL, NULL);
|
|
|
+ ret = __i915_wait_request(target, true, NULL, NULL);
|
|
|
if (ret == 0)
|
|
|
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
|
|
|
|