|
@@ -91,7 +91,7 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
|
|
|
spin_lock(&file->table_lock);
|
|
|
idr_for_each_entry(&file->object_idr, gobj, handle) {
|
|
|
WARN_ONCE(1, "And also active allocations!\n");
|
|
|
- drm_gem_object_unreference_unlocked(gobj);
|
|
|
+ drm_gem_object_put_unlocked(gobj);
|
|
|
}
|
|
|
idr_destroy(&file->object_idr);
|
|
|
spin_unlock(&file->table_lock);
|
|
@@ -263,7 +263,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
|
|
|
|
|
|
r = drm_gem_handle_create(filp, gobj, &handle);
|
|
|
/* drop reference from allocate - handle holds it now */
|
|
|
- drm_gem_object_unreference_unlocked(gobj);
|
|
|
+ drm_gem_object_put_unlocked(gobj);
|
|
|
if (r)
|
|
|
return r;
|
|
|
|
|
@@ -341,7 +341,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
|
|
|
|
|
r = drm_gem_handle_create(filp, gobj, &handle);
|
|
|
/* drop reference from allocate - handle holds it now */
|
|
|
- drm_gem_object_unreference_unlocked(gobj);
|
|
|
+ drm_gem_object_put_unlocked(gobj);
|
|
|
if (r)
|
|
|
return r;
|
|
|
|
|
@@ -355,7 +355,7 @@ unlock_mmap_sem:
|
|
|
up_read(¤t->mm->mmap_sem);
|
|
|
|
|
|
release_object:
|
|
|
- drm_gem_object_unreference_unlocked(gobj);
|
|
|
+ drm_gem_object_put_unlocked(gobj);
|
|
|
|
|
|
return r;
|
|
|
}
|
|
@@ -374,11 +374,11 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp,
|
|
|
robj = gem_to_amdgpu_bo(gobj);
|
|
|
if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
|
|
|
(robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
|
|
|
- drm_gem_object_unreference_unlocked(gobj);
|
|
|
+ drm_gem_object_put_unlocked(gobj);
|
|
|
return -EPERM;
|
|
|
}
|
|
|
*offset_p = amdgpu_bo_mmap_offset(robj);
|
|
|
- drm_gem_object_unreference_unlocked(gobj);
|
|
|
+ drm_gem_object_put_unlocked(gobj);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -448,7 +448,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
|
|
} else
|
|
|
r = ret;
|
|
|
|
|
|
- drm_gem_object_unreference_unlocked(gobj);
|
|
|
+ drm_gem_object_put_unlocked(gobj);
|
|
|
return r;
|
|
|
}
|
|
|
|
|
@@ -491,7 +491,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
|
|
|
unreserve:
|
|
|
amdgpu_bo_unreserve(robj);
|
|
|
out:
|
|
|
- drm_gem_object_unreference_unlocked(gobj);
|
|
|
+ drm_gem_object_put_unlocked(gobj);
|
|
|
return r;
|
|
|
}
|
|
|
|
|
@@ -664,7 +664,7 @@ error_backoff:
|
|
|
ttm_eu_backoff_reservation(&ticket, &list);
|
|
|
|
|
|
error_unref:
|
|
|
- drm_gem_object_unreference_unlocked(gobj);
|
|
|
+ drm_gem_object_put_unlocked(gobj);
|
|
|
return r;
|
|
|
}
|
|
|
|
|
@@ -726,7 +726,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
|
|
|
}
|
|
|
|
|
|
out:
|
|
|
- drm_gem_object_unreference_unlocked(gobj);
|
|
|
+ drm_gem_object_put_unlocked(gobj);
|
|
|
return r;
|
|
|
}
|
|
|
|
|
@@ -754,7 +754,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
|
|
|
|
|
|
r = drm_gem_handle_create(file_priv, gobj, &handle);
|
|
|
/* drop reference from allocate - handle holds it now */
|
|
|
- drm_gem_object_unreference_unlocked(gobj);
|
|
|
+ drm_gem_object_put_unlocked(gobj);
|
|
|
if (r) {
|
|
|
return r;
|
|
|
}
|