Browse Source

drm/i915: Update [vma|object]_move_to_active() to take request structures

Now that everything above has been converted to use request structures, it is
possible to update the lower level move_to_active() functions to be request
based as well.

For: VIZ-5115
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Reviewed-by: Tomas Elf <tomas.elf@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
John Harrison 10 years ago
parent
commit
b2af037693

+ 1 - 1
drivers/gpu/drm/i915/i915_drv.h

@@ -2808,7 +2808,7 @@ int i915_gem_object_sync(struct drm_i915_gem_object *obj,
 			 struct intel_engine_cs *to,
 			 struct drm_i915_gem_request **to_req);
 void i915_vma_move_to_active(struct i915_vma *vma,
-			     struct intel_engine_cs *ring);
+			     struct drm_i915_gem_request *req);
 int i915_gem_dumb_create(struct drm_file *file_priv,
 			 struct drm_device *dev,
 			 struct drm_mode_create_dumb *args);

+ 5 - 3
drivers/gpu/drm/i915/i915_gem.c

@@ -2340,9 +2340,12 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 }
 
 void i915_vma_move_to_active(struct i915_vma *vma,
-			     struct intel_engine_cs *ring)
+			     struct drm_i915_gem_request *req)
 {
 	struct drm_i915_gem_object *obj = vma->obj;
+	struct intel_engine_cs *ring;
+
+	ring = i915_gem_request_get_ring(req);
 
 	/* Add a reference if we're newly entering the active list. */
 	if (obj->active == 0)
@@ -2350,8 +2353,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
 	obj->active |= intel_ring_flag(ring);
 
 	list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
-	i915_gem_request_assign(&obj->last_read_req[ring->id],
-				intel_ring_get_request(ring));
+	i915_gem_request_assign(&obj->last_read_req[ring->id], req);
 
 	list_move_tail(&vma->mm_list, &vma->vm->active_list);
 }

+ 1 - 1
drivers/gpu/drm/i915/i915_gem_context.c

@@ -736,7 +736,7 @@ static int do_switch(struct drm_i915_gem_request *req)
 	 */
 	if (from != NULL) {
 		from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
-		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring);
+		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req);
 		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
 		 * whole damn pipeline, we don't need to explicitly mark the
 		 * object dirty. The only exception is that the context must be

+ 1 - 1
drivers/gpu/drm/i915/i915_gem_execbuffer.c

@@ -1036,7 +1036,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 			obj->base.pending_read_domains |= obj->base.read_domains;
 		obj->base.read_domains = obj->base.pending_read_domains;
 
-		i915_vma_move_to_active(vma, ring);
+		i915_vma_move_to_active(vma, req);
 		if (obj->base.write_domain) {
 			obj->dirty = 1;
 			i915_gem_request_assign(&obj->last_write_req, req);

+ 1 - 1
drivers/gpu/drm/i915/i915_gem_render_state.c

@@ -171,7 +171,7 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
 	if (ret)
 		goto out;
 
-	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req->ring);
+	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
 
 out:
 	i915_gem_render_state_fini(&so);

+ 1 - 1
drivers/gpu/drm/i915/intel_lrc.c

@@ -1593,7 +1593,7 @@ static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
 	if (ret)
 		goto out;
 
-	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req->ring);
+	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
 
 out:
 	i915_gem_render_state_fini(&so);