|
@@ -111,6 +111,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
|
|
|
p->uf_entry.priority = 0;
|
|
|
p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
|
|
|
p->uf_entry.tv.shared = true;
|
|
|
+ p->uf_entry.user_pages = NULL;
|
|
|
|
|
|
drm_gem_object_unreference_unlocked(gobj);
|
|
|
return 0;
|
|
@@ -297,6 +298,7 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
|
|
|
|
|
|
list_for_each_entry(lobj, validated, tv.head) {
|
|
|
struct amdgpu_bo *bo = lobj->robj;
|
|
|
+ bool binding_userptr = false;
|
|
|
struct mm_struct *usermm;
|
|
|
uint32_t domain;
|
|
|
|
|
@@ -304,6 +306,15 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
|
|
|
if (usermm && usermm != current->mm)
|
|
|
return -EPERM;
|
|
|
|
|
|
+ /* Check if we have user pages and nobody bound the BO already */
|
|
|
+ if (lobj->user_pages && bo->tbo.ttm->state != tt_bound) {
|
|
|
+ size_t size = sizeof(struct page *);
|
|
|
+
|
|
|
+ size *= bo->tbo.ttm->num_pages;
|
|
|
+ memcpy(bo->tbo.ttm->pages, lobj->user_pages, size);
|
|
|
+ binding_userptr = true;
|
|
|
+ }
|
|
|
+
|
|
|
if (bo->pin_count)
|
|
|
continue;
|
|
|
|
|
@@ -334,6 +345,11 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
|
|
|
}
|
|
|
return r;
|
|
|
}
|
|
|
+
|
|
|
+ if (binding_userptr) {
|
|
|
+ drm_free_large(lobj->user_pages);
|
|
|
+ lobj->user_pages = NULL;
|
|
|
+ }
|
|
|
}
|
|
|
return 0;
|
|
|
}
|
|
@@ -342,8 +358,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|
|
union drm_amdgpu_cs *cs)
|
|
|
{
|
|
|
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
|
|
+ struct amdgpu_bo_list_entry *e;
|
|
|
struct list_head duplicates;
|
|
|
bool need_mmap_lock = false;
|
|
|
+ unsigned i, tries = 10;
|
|
|
int r;
|
|
|
|
|
|
INIT_LIST_HEAD(&p->validated);
|
|
@@ -364,9 +382,81 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|
|
if (need_mmap_lock)
|
|
|
down_read(¤t->mm->mmap_sem);
|
|
|
|
|
|
- r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
|
|
|
- if (unlikely(r != 0))
|
|
|
- goto error_reserve;
|
|
|
+ while (1) {
|
|
|
+ struct list_head need_pages;
|
|
|
+ unsigned i;
|
|
|
+
|
|
|
+ r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
|
|
|
+ &duplicates);
|
|
|
+ if (unlikely(r != 0))
|
|
|
+ goto error_free_pages;
|
|
|
+
|
|
|
+ /* Without a BO list we don't have userptr BOs */
|
|
|
+ if (!p->bo_list)
|
|
|
+ break;
|
|
|
+
|
|
|
+ INIT_LIST_HEAD(&need_pages);
|
|
|
+ for (i = p->bo_list->first_userptr;
|
|
|
+ i < p->bo_list->num_entries; ++i) {
|
|
|
+
|
|
|
+ e = &p->bo_list->array[i];
|
|
|
+
|
|
|
+ if (amdgpu_ttm_tt_userptr_invalidated(e->robj->tbo.ttm,
|
|
|
+ &e->user_invalidated) && e->user_pages) {
|
|
|
+
|
|
|
+ /* We acquired a page array, but somebody
|
|
|
+ * invalidated it. Free it an try again
|
|
|
+ */
|
|
|
+ release_pages(e->user_pages,
|
|
|
+ e->robj->tbo.ttm->num_pages,
|
|
|
+ false);
|
|
|
+ drm_free_large(e->user_pages);
|
|
|
+ e->user_pages = NULL;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (e->robj->tbo.ttm->state != tt_bound &&
|
|
|
+ !e->user_pages) {
|
|
|
+ list_del(&e->tv.head);
|
|
|
+ list_add(&e->tv.head, &need_pages);
|
|
|
+
|
|
|
+ amdgpu_bo_unreserve(e->robj);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (list_empty(&need_pages))
|
|
|
+ break;
|
|
|
+
|
|
|
+ /* Unreserve everything again. */
|
|
|
+ ttm_eu_backoff_reservation(&p->ticket, &p->validated);
|
|
|
+
|
|
|
+ /* We tried to often, just abort */
|
|
|
+ if (!--tries) {
|
|
|
+ r = -EDEADLK;
|
|
|
+ goto error_free_pages;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Fill the page arrays for all useptrs. */
|
|
|
+ list_for_each_entry(e, &need_pages, tv.head) {
|
|
|
+ struct ttm_tt *ttm = e->robj->tbo.ttm;
|
|
|
+
|
|
|
+ e->user_pages = drm_calloc_large(ttm->num_pages,
|
|
|
+ sizeof(struct page*));
|
|
|
+ if (!e->user_pages) {
|
|
|
+ r = -ENOMEM;
|
|
|
+ goto error_free_pages;
|
|
|
+ }
|
|
|
+
|
|
|
+ r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
|
|
|
+ if (r) {
|
|
|
+ drm_free_large(e->user_pages);
|
|
|
+ e->user_pages = NULL;
|
|
|
+ goto error_free_pages;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /* And try again. */
|
|
|
+ list_splice(&need_pages, &p->validated);
|
|
|
+ }
|
|
|
|
|
|
amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates);
|
|
|
|
|
@@ -398,10 +488,26 @@ error_validate:
|
|
|
ttm_eu_backoff_reservation(&p->ticket, &p->validated);
|
|
|
}
|
|
|
|
|
|
-error_reserve:
|
|
|
+error_free_pages:
|
|
|
+
|
|
|
if (need_mmap_lock)
|
|
|
up_read(¤t->mm->mmap_sem);
|
|
|
|
|
|
+ if (p->bo_list) {
|
|
|
+ for (i = p->bo_list->first_userptr;
|
|
|
+ i < p->bo_list->num_entries; ++i) {
|
|
|
+ e = &p->bo_list->array[i];
|
|
|
+
|
|
|
+ if (!e->user_pages)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ release_pages(e->user_pages,
|
|
|
+ e->robj->tbo.ttm->num_pages,
|
|
|
+ false);
|
|
|
+ drm_free_large(e->user_pages);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
return r;
|
|
|
}
|
|
|
|