|
@@ -30,6 +30,19 @@
|
|
|
struct amd_gpu_scheduler;
|
|
|
struct amd_sched_rq;
|
|
|
|
|
|
+enum amd_sched_priority {
|
|
|
+ AMD_SCHED_PRIORITY_MIN,
|
|
|
+ AMD_SCHED_PRIORITY_LOW = AMD_SCHED_PRIORITY_MIN,
|
|
|
+ AMD_SCHED_PRIORITY_NORMAL,
|
|
|
+ AMD_SCHED_PRIORITY_HIGH_SW,
|
|
|
+ AMD_SCHED_PRIORITY_HIGH_HW,
|
|
|
+ AMD_SCHED_PRIORITY_KERNEL,
|
|
|
+ AMD_SCHED_PRIORITY_MAX,
|
|
|
+ AMD_SCHED_PRIORITY_INVALID = -1,
|
|
|
+ AMD_SCHED_PRIORITY_UNSET = -2
|
|
|
+};
|
|
|
+
|
|
|
+
|
|
|
/**
|
|
|
* A scheduler entity is a wrapper around a job queue or a group
|
|
|
* of other entities. Entities take turns emitting jobs from their
|
|
@@ -83,6 +96,7 @@ struct amd_sched_job {
|
|
|
struct delayed_work work_tdr;
|
|
|
uint64_t id;
|
|
|
atomic_t karma;
|
|
|
+ enum amd_sched_priority s_priority;
|
|
|
};
|
|
|
|
|
|
extern const struct dma_fence_ops amd_sched_fence_ops_scheduled;
|
|
@@ -114,18 +128,6 @@ struct amd_sched_backend_ops {
|
|
|
void (*free_job)(struct amd_sched_job *sched_job);
|
|
|
};
|
|
|
|
|
|
-enum amd_sched_priority {
|
|
|
- AMD_SCHED_PRIORITY_MIN,
|
|
|
- AMD_SCHED_PRIORITY_LOW = AMD_SCHED_PRIORITY_MIN,
|
|
|
- AMD_SCHED_PRIORITY_NORMAL,
|
|
|
- AMD_SCHED_PRIORITY_HIGH_SW,
|
|
|
- AMD_SCHED_PRIORITY_HIGH_HW,
|
|
|
- AMD_SCHED_PRIORITY_KERNEL,
|
|
|
- AMD_SCHED_PRIORITY_MAX,
|
|
|
- AMD_SCHED_PRIORITY_INVALID = -1,
|
|
|
- AMD_SCHED_PRIORITY_UNSET = -2
|
|
|
-};
|
|
|
-
|
|
|
/**
|
|
|
* One scheduler is implemented for each hardware ring
|
|
|
*/
|
|
@@ -176,10 +178,4 @@ bool amd_sched_dependency_optimized(struct dma_fence* fence,
|
|
|
struct amd_sched_entity *entity);
|
|
|
void amd_sched_job_kickout(struct amd_sched_job *s_job);
|
|
|
|
|
|
-static inline enum amd_sched_priority
|
|
|
-amd_sched_get_job_priority(struct amd_sched_job *job)
|
|
|
-{
|
|
|
- return (job->s_entity->rq - job->sched->sched_rq);
|
|
|
-}
|
|
|
-
|
|
|
#endif
|