|
@@ -139,6 +139,20 @@ int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
|
|
|
return r;
|
|
|
}
|
|
|
|
|
|
+static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f)
|
|
|
+{
|
|
|
+ struct amdgpu_fence *a_fence;
|
|
|
+ struct amd_sched_fence *s_fence;
|
|
|
+
|
|
|
+ s_fence = to_amd_sched_fence(f);
|
|
|
+ if (s_fence)
|
|
|
+ return s_fence->entity->scheduler->ring_id;
|
|
|
+ a_fence = to_amdgpu_fence(f);
|
|
|
+ if (a_fence)
|
|
|
+ return a_fence->ring->idx;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
|
|
|
{
|
|
|
struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
|
|
@@ -147,7 +161,7 @@ static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
|
|
|
}
|
|
|
list_del_init(&sa_bo->olist);
|
|
|
list_del_init(&sa_bo->flist);
|
|
|
- amdgpu_fence_unref(&sa_bo->fence);
|
|
|
+ fence_put(sa_bo->fence);
|
|
|
kfree(sa_bo);
|
|
|
}
|
|
|
|
|
@@ -161,7 +175,7 @@ static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
|
|
|
sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
|
|
|
list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
|
|
|
if (sa_bo->fence == NULL ||
|
|
|
- !fence_is_signaled(&sa_bo->fence->base)) {
|
|
|
+ !fence_is_signaled(sa_bo->fence)) {
|
|
|
return;
|
|
|
}
|
|
|
amdgpu_sa_bo_remove_locked(sa_bo);
|
|
@@ -246,7 +260,7 @@ static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
|
|
|
}
|
|
|
|
|
|
static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
|
|
|
- struct amdgpu_fence **fences,
|
|
|
+ struct fence **fences,
|
|
|
unsigned *tries)
|
|
|
{
|
|
|
struct amdgpu_sa_bo *best_bo = NULL;
|
|
@@ -275,7 +289,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
|
|
|
sa_bo = list_first_entry(&sa_manager->flist[i],
|
|
|
struct amdgpu_sa_bo, flist);
|
|
|
|
|
|
- if (!fence_is_signaled(&sa_bo->fence->base)) {
|
|
|
+ if (!fence_is_signaled(sa_bo->fence)) {
|
|
|
fences[i] = sa_bo->fence;
|
|
|
continue;
|
|
|
}
|
|
@@ -299,7 +313,8 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
|
|
|
}
|
|
|
|
|
|
if (best_bo) {
|
|
|
- ++tries[best_bo->fence->ring->idx];
|
|
|
+ uint32_t idx = amdgpu_sa_get_ring_from_fence(best_bo->fence);
|
|
|
+ ++tries[idx];
|
|
|
sa_manager->hole = best_bo->olist.prev;
|
|
|
|
|
|
/* we knew that this one is signaled,
|
|
@@ -315,7 +330,7 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev,
|
|
|
struct amdgpu_sa_bo **sa_bo,
|
|
|
unsigned size, unsigned align)
|
|
|
{
|
|
|
- struct amdgpu_fence *fences[AMDGPU_MAX_RINGS];
|
|
|
+ struct fence *fences[AMDGPU_MAX_RINGS];
|
|
|
unsigned tries[AMDGPU_MAX_RINGS];
|
|
|
int i, r;
|
|
|
signed long t;
|
|
@@ -373,7 +388,7 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev,
|
|
|
}
|
|
|
|
|
|
void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
|
|
|
- struct amdgpu_fence *fence)
|
|
|
+ struct fence *fence)
|
|
|
{
|
|
|
struct amdgpu_sa_manager *sa_manager;
|
|
|
|
|
@@ -383,10 +398,11 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
|
|
|
|
|
|
sa_manager = (*sa_bo)->manager;
|
|
|
spin_lock(&sa_manager->wq.lock);
|
|
|
- if (fence && !fence_is_signaled(&fence->base)) {
|
|
|
- (*sa_bo)->fence = amdgpu_fence_ref(fence);
|
|
|
- list_add_tail(&(*sa_bo)->flist,
|
|
|
- &sa_manager->flist[fence->ring->idx]);
|
|
|
+ if (fence && !fence_is_signaled(fence)) {
|
|
|
+ uint32_t idx;
|
|
|
+ (*sa_bo)->fence = fence_get(fence);
|
|
|
+ idx = amdgpu_sa_get_ring_from_fence(fence);
|
|
|
+ list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
|
|
|
} else {
|
|
|
amdgpu_sa_bo_remove_locked(*sa_bo);
|
|
|
}
|
|
@@ -413,8 +429,16 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
|
|
|
seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
|
|
|
soffset, eoffset, eoffset - soffset);
|
|
|
if (i->fence) {
|
|
|
- seq_printf(m, " protected by 0x%016llx on ring %d",
|
|
|
- i->fence->seq, i->fence->ring->idx);
|
|
|
+ struct amdgpu_fence *a_fence = to_amdgpu_fence(i->fence);
|
|
|
+ struct amd_sched_fence *s_fence = to_amd_sched_fence(i->fence);
|
|
|
+ if (a_fence)
|
|
|
+ seq_printf(m, " protected by 0x%016llx on ring %d",
|
|
|
+ a_fence->seq, a_fence->ring->idx);
|
|
|
+ if (s_fence)
|
|
|
+ seq_printf(m, " protected by 0x%016llx on ring %d",
|
|
|
+ s_fence->v_seq,
|
|
|
+ s_fence->entity->scheduler->ring_id);
|
|
|
+
|
|
|
}
|
|
|
seq_printf(m, "\n");
|
|
|
}
|