|
@@ -127,6 +127,37 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
|
|
|
|
+ struct drm_amdgpu_cs_chunk_fence *fence_data)
|
|
|
|
+{
|
|
|
|
+ struct drm_gem_object *gobj;
|
|
|
|
+ uint32_t handle;
|
|
|
|
+
|
|
|
|
+ handle = fence_data->handle;
|
|
|
|
+ gobj = drm_gem_object_lookup(p->adev->ddev, p->filp,
|
|
|
|
+ fence_data->handle);
|
|
|
|
+ if (gobj == NULL)
|
|
|
|
+ return -EINVAL;
|
|
|
|
+
|
|
|
|
+ p->uf.bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
|
|
|
|
+ p->uf.offset = fence_data->offset;
|
|
|
|
+
|
|
|
|
+ if (amdgpu_ttm_tt_has_userptr(p->uf.bo->tbo.ttm)) {
|
|
|
|
+ drm_gem_object_unreference_unlocked(gobj);
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ p->uf_entry.robj = amdgpu_bo_ref(p->uf.bo);
|
|
|
|
+ p->uf_entry.prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
|
|
|
|
+ p->uf_entry.allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
|
|
|
|
+ p->uf_entry.priority = 0;
|
|
|
|
+ p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
|
|
|
|
+ p->uf_entry.tv.shared = true;
|
|
|
|
+
|
|
|
|
+ drm_gem_object_unreference_unlocked(gobj);
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
|
|
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
|
|
{
|
|
{
|
|
union drm_amdgpu_cs *cs = data;
|
|
union drm_amdgpu_cs *cs = data;
|
|
@@ -207,28 +238,15 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
|
|
|
|
|
|
case AMDGPU_CHUNK_ID_FENCE:
|
|
case AMDGPU_CHUNK_ID_FENCE:
|
|
size = sizeof(struct drm_amdgpu_cs_chunk_fence);
|
|
size = sizeof(struct drm_amdgpu_cs_chunk_fence);
|
|
- if (p->chunks[i].length_dw * sizeof(uint32_t) >= size) {
|
|
|
|
- uint32_t handle;
|
|
|
|
- struct drm_gem_object *gobj;
|
|
|
|
- struct drm_amdgpu_cs_chunk_fence *fence_data;
|
|
|
|
-
|
|
|
|
- fence_data = (void *)p->chunks[i].kdata;
|
|
|
|
- handle = fence_data->handle;
|
|
|
|
- gobj = drm_gem_object_lookup(p->adev->ddev,
|
|
|
|
- p->filp, handle);
|
|
|
|
- if (gobj == NULL) {
|
|
|
|
- ret = -EINVAL;
|
|
|
|
- goto free_partial_kdata;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- p->uf.bo = gem_to_amdgpu_bo(gobj);
|
|
|
|
- amdgpu_bo_ref(p->uf.bo);
|
|
|
|
- drm_gem_object_unreference_unlocked(gobj);
|
|
|
|
- p->uf.offset = fence_data->offset;
|
|
|
|
- } else {
|
|
|
|
|
|
+ if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
|
|
ret = -EINVAL;
|
|
ret = -EINVAL;
|
|
goto free_partial_kdata;
|
|
goto free_partial_kdata;
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ ret = amdgpu_cs_user_fence_chunk(p, (void *)p->chunks[i].kdata);
|
|
|
|
+ if (ret)
|
|
|
|
+ goto free_partial_kdata;
|
|
|
|
+
|
|
break;
|
|
break;
|
|
|
|
|
|
case AMDGPU_CHUNK_ID_DEPENDENCIES:
|
|
case AMDGPU_CHUNK_ID_DEPENDENCIES:
|
|
@@ -391,6 +409,9 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
|
|
p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm,
|
|
p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm,
|
|
&p->validated);
|
|
&p->validated);
|
|
|
|
|
|
|
|
+ if (p->uf.bo)
|
|
|
|
+ list_add(&p->uf_entry.tv.head, &p->validated);
|
|
|
|
+
|
|
if (need_mmap_lock)
|
|
if (need_mmap_lock)
|
|
down_read(¤t->mm->mmap_sem);
|
|
down_read(¤t->mm->mmap_sem);
|
|
|
|
|
|
@@ -488,8 +509,8 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
|
|
for (i = 0; i < parser->num_ibs; i++)
|
|
for (i = 0; i < parser->num_ibs; i++)
|
|
amdgpu_ib_free(parser->adev, &parser->ibs[i]);
|
|
amdgpu_ib_free(parser->adev, &parser->ibs[i]);
|
|
kfree(parser->ibs);
|
|
kfree(parser->ibs);
|
|
- if (parser->uf.bo)
|
|
|
|
- amdgpu_bo_unref(&parser->uf.bo);
|
|
|
|
|
|
+ amdgpu_bo_unref(&parser->uf.bo);
|
|
|
|
+ amdgpu_bo_unref(&parser->uf_entry.robj);
|
|
}
|
|
}
|
|
|
|
|
|
static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
|
|
static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
|