|
@@ -95,7 +95,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
|
|
|
|
|
|
DBG("%s", gpu->name);
|
|
|
|
|
|
- ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova);
|
|
|
+ ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova);
|
|
|
if (ret) {
|
|
|
gpu->rb_iova = 0;
|
|
|
dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
|
|
@@ -370,8 +370,10 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+ mutex_lock(&drm->struct_mutex);
|
|
|
gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs),
|
|
|
MSM_BO_UNCACHED);
|
|
|
+ mutex_unlock(&drm->struct_mutex);
|
|
|
if (IS_ERR(gpu->memptrs_bo)) {
|
|
|
ret = PTR_ERR(gpu->memptrs_bo);
|
|
|
gpu->memptrs_bo = NULL;
|
|
@@ -379,13 +381,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
- gpu->memptrs = msm_gem_vaddr_locked(gpu->memptrs_bo);
|
|
|
+ gpu->memptrs = msm_gem_vaddr(gpu->memptrs_bo);
|
|
|
if (!gpu->memptrs) {
|
|
|
dev_err(drm->dev, "could not vmap memptrs\n");
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
- ret = msm_gem_get_iova_locked(gpu->memptrs_bo, gpu->base.id,
|
|
|
+ ret = msm_gem_get_iova(gpu->memptrs_bo, gpu->base.id,
|
|
|
&gpu->memptrs_iova);
|
|
|
if (ret) {
|
|
|
dev_err(drm->dev, "could not map memptrs: %d\n", ret);
|