|
@@ -397,23 +397,10 @@ void *msm_gem_vaddr(struct drm_gem_object *obj)
|
|
int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
|
|
int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
|
|
struct msm_fence_cb *cb)
|
|
struct msm_fence_cb *cb)
|
|
{
|
|
{
|
|
- struct drm_device *dev = obj->dev;
|
|
|
|
- struct msm_drm_private *priv = dev->dev_private;
|
|
|
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
|
- int ret = 0;
|
|
|
|
-
|
|
|
|
- mutex_lock(&dev->struct_mutex);
|
|
|
|
- if (!list_empty(&cb->work.entry)) {
|
|
|
|
- ret = -EINVAL;
|
|
|
|
- } else if (is_active(msm_obj)) {
|
|
|
|
- cb->fence = max(msm_obj->read_fence, msm_obj->write_fence);
|
|
|
|
- list_add_tail(&cb->work.entry, &priv->fence_cbs);
|
|
|
|
- } else {
|
|
|
|
- queue_work(priv->wq, &cb->work);
|
|
|
|
- }
|
|
|
|
- mutex_unlock(&dev->struct_mutex);
|
|
|
|
-
|
|
|
|
- return ret;
|
|
|
|
|
|
+ uint32_t fence = msm_gem_fence(msm_obj,
|
|
|
|
+ MSM_PREP_READ | MSM_PREP_WRITE);
|
|
|
|
+ return msm_queue_fence_cb(obj->dev, cb, fence);
|
|
}
|
|
}
|
|
|
|
|
|
void msm_gem_move_to_active(struct drm_gem_object *obj,
|
|
void msm_gem_move_to_active(struct drm_gem_object *obj,
|
|
@@ -452,12 +439,8 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
|
|
int ret = 0;
|
|
int ret = 0;
|
|
|
|
|
|
if (is_active(msm_obj)) {
|
|
if (is_active(msm_obj)) {
|
|
- uint32_t fence = 0;
|
|
|
|
|
|
+ uint32_t fence = msm_gem_fence(msm_obj, op);
|
|
|
|
|
|
- if (op & MSM_PREP_READ)
|
|
|
|
- fence = msm_obj->write_fence;
|
|
|
|
- if (op & MSM_PREP_WRITE)
|
|
|
|
- fence = max(fence, msm_obj->read_fence);
|
|
|
|
if (op & MSM_PREP_NOSYNC)
|
|
if (op & MSM_PREP_NOSYNC)
|
|
timeout = NULL;
|
|
timeout = NULL;
|
|
|
|
|