浏览代码

drm/i915: Remove superfluous i915_add_request_no_flush() helper

The only time we need to emit a flush inside request emission is after
an execbuffer, for which we can use the full __i915_add_request(). All
other instances want the simpler i915_add_request() without flushing, so
remove the useless helper.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170317114709.8388-1-chris@chris-wilson.co.uk
Chris Wilson 8 年之前
父节点
当前提交
e642c85b03

+ 1 - 1
drivers/gpu/drm/i915/gvt/scheduler.c

@@ -212,7 +212,7 @@ out:
 		workload->status = ret;
 
 	if (!IS_ERR_OR_NULL(rq))
-		i915_add_request_no_flush(rq);
+		i915_add_request(rq);
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 	return ret;
 }

+ 1 - 1
drivers/gpu/drm/i915/i915_gem_context.c

@@ -933,7 +933,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
 		}
 
 		ret = i915_switch_context(req);
-		i915_add_request_no_flush(req);
+		i915_add_request(req);
 		if (ret)
 			return ret;
 	}

+ 0 - 2
drivers/gpu/drm/i915/i915_gem_request.h

@@ -267,8 +267,6 @@ int i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
 
 void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
 #define i915_add_request(req) \
-	__i915_add_request(req, true)
-#define i915_add_request_no_flush(req) \
 	__i915_add_request(req, false)
 
 void __i915_gem_request_submit(struct drm_i915_gem_request *request);

+ 2 - 2
drivers/gpu/drm/i915/intel_display.c

@@ -10668,7 +10668,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 		intel_mark_page_flip_active(intel_crtc, work);
 
 		work->flip_queued_req = i915_gem_request_get(request);
-		i915_add_request_no_flush(request);
+		i915_add_request(request);
 	}
 
 	i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
@@ -10684,7 +10684,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 	return 0;
 
 cleanup_request:
-	i915_add_request_no_flush(request);
+	i915_add_request(request);
 cleanup_unpin:
 	to_intel_plane_state(primary->state)->vma = work->old_vma;
 	intel_unpin_fb_vma(vma);

+ 4 - 4
drivers/gpu/drm/i915/intel_overlay.c

@@ -278,7 +278,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 
 	cs = intel_ring_begin(req, 4);
 	if (IS_ERR(cs)) {
-		i915_add_request_no_flush(req);
+		i915_add_request(req);
 		return PTR_ERR(cs);
 	}
 
@@ -343,7 +343,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 
 	cs = intel_ring_begin(req, 2);
 	if (IS_ERR(cs)) {
-		i915_add_request_no_flush(req);
+		i915_add_request(req);
 		return PTR_ERR(cs);
 	}
 
@@ -419,7 +419,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 
 	cs = intel_ring_begin(req, 6);
 	if (IS_ERR(cs)) {
-		i915_add_request_no_flush(req);
+		i915_add_request(req);
 		return PTR_ERR(cs);
 	}
 
@@ -477,7 +477,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 
 		cs = intel_ring_begin(req, 2);
 		if (IS_ERR(cs)) {
-			i915_add_request_no_flush(req);
+			i915_add_request(req);
 			return PTR_ERR(cs);
 		}
 

+ 1 - 1
drivers/gpu/drm/i915/intel_pm.c

@@ -7086,7 +7086,7 @@ static void __intel_autoenable_gt_powersave(struct work_struct *work)
 		rcs->init_context(req);
 
 	/* Mark the device busy, calling intel_enable_gt_powersave() */
-	i915_add_request_no_flush(req);
+	i915_add_request(req);
 
 unlock:
 	mutex_unlock(&dev_priv->drm.struct_mutex);