|
@@ -563,10 +563,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
|
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
|
struct amdgpu_bo_list_entry *e;
|
|
struct amdgpu_bo_list_entry *e;
|
|
struct list_head duplicates;
|
|
struct list_head duplicates;
|
|
- unsigned i, tries = 10;
|
|
|
|
struct amdgpu_bo *gds;
|
|
struct amdgpu_bo *gds;
|
|
struct amdgpu_bo *gws;
|
|
struct amdgpu_bo *gws;
|
|
struct amdgpu_bo *oa;
|
|
struct amdgpu_bo *oa;
|
|
|
|
+ unsigned tries = 10;
|
|
int r;
|
|
int r;
|
|
|
|
|
|
INIT_LIST_HEAD(&p->validated);
|
|
INIT_LIST_HEAD(&p->validated);
|
|
@@ -596,7 +596,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|
|
|
|
|
while (1) {
|
|
while (1) {
|
|
struct list_head need_pages;
|
|
struct list_head need_pages;
|
|
- unsigned i;
|
|
|
|
|
|
|
|
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
|
|
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
|
|
&duplicates);
|
|
&duplicates);
|
|
@@ -611,12 +610,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|
break;
|
|
break;
|
|
|
|
|
|
INIT_LIST_HEAD(&need_pages);
|
|
INIT_LIST_HEAD(&need_pages);
|
|
- for (i = p->bo_list->first_userptr;
|
|
|
|
- i < p->bo_list->num_entries; ++i) {
|
|
|
|
- struct amdgpu_bo *bo;
|
|
|
|
-
|
|
|
|
- e = &p->bo_list->array[i];
|
|
|
|
- bo = e->robj;
|
|
|
|
|
|
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
|
|
|
|
+ struct amdgpu_bo *bo = e->robj;
|
|
|
|
|
|
if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
|
|
if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
|
|
&e->user_invalidated) && e->user_pages) {
|
|
&e->user_invalidated) && e->user_pages) {
|
|
@@ -710,16 +705,14 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|
|
|
|
|
if (p->bo_list) {
|
|
if (p->bo_list) {
|
|
struct amdgpu_vm *vm = &fpriv->vm;
|
|
struct amdgpu_vm *vm = &fpriv->vm;
|
|
- unsigned i;
|
|
|
|
|
|
+ struct amdgpu_bo_list_entry *e;
|
|
|
|
|
|
gds = p->bo_list->gds_obj;
|
|
gds = p->bo_list->gds_obj;
|
|
gws = p->bo_list->gws_obj;
|
|
gws = p->bo_list->gws_obj;
|
|
oa = p->bo_list->oa_obj;
|
|
oa = p->bo_list->oa_obj;
|
|
- for (i = 0; i < p->bo_list->num_entries; i++) {
|
|
|
|
- struct amdgpu_bo *bo = p->bo_list->array[i].robj;
|
|
|
|
|
|
|
|
- p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo);
|
|
|
|
- }
|
|
|
|
|
|
+ amdgpu_bo_list_for_each_entry(e, p->bo_list)
|
|
|
|
+ e->bo_va = amdgpu_vm_bo_find(vm, e->robj);
|
|
} else {
|
|
} else {
|
|
gds = p->adev->gds.gds_gfx_bo;
|
|
gds = p->adev->gds.gds_gfx_bo;
|
|
gws = p->adev->gds.gws_gfx_bo;
|
|
gws = p->adev->gds.gws_gfx_bo;
|
|
@@ -753,10 +746,7 @@ error_validate:
|
|
error_free_pages:
|
|
error_free_pages:
|
|
|
|
|
|
if (p->bo_list) {
|
|
if (p->bo_list) {
|
|
- for (i = p->bo_list->first_userptr;
|
|
|
|
- i < p->bo_list->num_entries; ++i) {
|
|
|
|
- e = &p->bo_list->array[i];
|
|
|
|
-
|
|
|
|
|
|
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
|
|
if (!e->user_pages)
|
|
if (!e->user_pages)
|
|
continue;
|
|
continue;
|
|
|
|
|
|
@@ -830,7 +820,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
|
|
struct amdgpu_vm *vm = &fpriv->vm;
|
|
struct amdgpu_vm *vm = &fpriv->vm;
|
|
struct amdgpu_bo_va *bo_va;
|
|
struct amdgpu_bo_va *bo_va;
|
|
struct amdgpu_bo *bo;
|
|
struct amdgpu_bo *bo;
|
|
- int i, r;
|
|
|
|
|
|
+ int r;
|
|
|
|
|
|
r = amdgpu_vm_clear_freed(adev, vm, NULL);
|
|
r = amdgpu_vm_clear_freed(adev, vm, NULL);
|
|
if (r)
|
|
if (r)
|
|
@@ -861,15 +851,17 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
|
|
}
|
|
}
|
|
|
|
|
|
if (p->bo_list) {
|
|
if (p->bo_list) {
|
|
- for (i = 0; i < p->bo_list->num_entries; i++) {
|
|
|
|
|
|
+ struct amdgpu_bo_list_entry *e;
|
|
|
|
+
|
|
|
|
+ amdgpu_bo_list_for_each_entry(e, p->bo_list) {
|
|
struct dma_fence *f;
|
|
struct dma_fence *f;
|
|
|
|
|
|
/* ignore duplicates */
|
|
/* ignore duplicates */
|
|
- bo = p->bo_list->array[i].robj;
|
|
|
|
|
|
+ bo = e->robj;
|
|
if (!bo)
|
|
if (!bo)
|
|
continue;
|
|
continue;
|
|
|
|
|
|
- bo_va = p->bo_list->array[i].bo_va;
|
|
|
|
|
|
+ bo_va = e->bo_va;
|
|
if (bo_va == NULL)
|
|
if (bo_va == NULL)
|
|
continue;
|
|
continue;
|
|
|
|
|
|
@@ -898,14 +890,15 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
|
|
return r;
|
|
return r;
|
|
|
|
|
|
if (amdgpu_vm_debug && p->bo_list) {
|
|
if (amdgpu_vm_debug && p->bo_list) {
|
|
|
|
+ struct amdgpu_bo_list_entry *e;
|
|
|
|
+
|
|
/* Invalidate all BOs to test for userspace bugs */
|
|
/* Invalidate all BOs to test for userspace bugs */
|
|
- for (i = 0; i < p->bo_list->num_entries; i++) {
|
|
|
|
|
|
+ amdgpu_bo_list_for_each_entry(e, p->bo_list) {
|
|
/* ignore duplicates */
|
|
/* ignore duplicates */
|
|
- bo = p->bo_list->array[i].robj;
|
|
|
|
- if (!bo)
|
|
|
|
|
|
+ if (!e->robj)
|
|
continue;
|
|
continue;
|
|
|
|
|
|
- amdgpu_vm_bo_invalidate(adev, bo, false);
|
|
|
|
|
|
+ amdgpu_vm_bo_invalidate(adev, e->robj, false);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1225,16 +1218,16 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
|
struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
|
|
struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
|
|
enum drm_sched_priority priority;
|
|
enum drm_sched_priority priority;
|
|
struct amdgpu_job *job;
|
|
struct amdgpu_job *job;
|
|
- unsigned i;
|
|
|
|
uint64_t seq;
|
|
uint64_t seq;
|
|
|
|
|
|
int r;
|
|
int r;
|
|
|
|
|
|
amdgpu_mn_lock(p->mn);
|
|
amdgpu_mn_lock(p->mn);
|
|
if (p->bo_list) {
|
|
if (p->bo_list) {
|
|
- for (i = p->bo_list->first_userptr;
|
|
|
|
- i < p->bo_list->num_entries; ++i) {
|
|
|
|
- struct amdgpu_bo *bo = p->bo_list->array[i].robj;
|
|
|
|
|
|
+ struct amdgpu_bo_list_entry *e;
|
|
|
|
+
|
|
|
|
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
|
|
|
|
+ struct amdgpu_bo *bo = e->robj;
|
|
|
|
|
|
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
|
|
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
|
|
amdgpu_mn_unlock(p->mn);
|
|
amdgpu_mn_unlock(p->mn);
|