gpu_scheduler.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695
  1. /*
  2. * Copyright 2015 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. *
  23. */
  24. #include <linux/kthread.h>
  25. #include <linux/wait.h>
  26. #include <linux/sched.h>
  27. #include <uapi/linux/sched/types.h>
  28. #include <drm/drmP.h>
  29. #include "gpu_scheduler.h"
  30. #define CREATE_TRACE_POINTS
  31. #include "gpu_sched_trace.h"
  32. static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
  33. static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
  34. static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
  35. /* Initialize a given run queue struct */
  36. static void amd_sched_rq_init(struct amd_sched_rq *rq)
  37. {
  38. spin_lock_init(&rq->lock);
  39. INIT_LIST_HEAD(&rq->entities);
  40. rq->current_entity = NULL;
  41. }
  42. static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
  43. struct amd_sched_entity *entity)
  44. {
  45. if (!list_empty(&entity->list))
  46. return;
  47. spin_lock(&rq->lock);
  48. list_add_tail(&entity->list, &rq->entities);
  49. spin_unlock(&rq->lock);
  50. }
  51. static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
  52. struct amd_sched_entity *entity)
  53. {
  54. if (list_empty(&entity->list))
  55. return;
  56. spin_lock(&rq->lock);
  57. list_del_init(&entity->list);
  58. if (rq->current_entity == entity)
  59. rq->current_entity = NULL;
  60. spin_unlock(&rq->lock);
  61. }
  62. /**
  63. * Select an entity which could provide a job to run
  64. *
  65. * @rq The run queue to check.
  66. *
  67. * Try to find a ready entity, returns NULL if none found.
  68. */
  69. static struct amd_sched_entity *
  70. amd_sched_rq_select_entity(struct amd_sched_rq *rq)
  71. {
  72. struct amd_sched_entity *entity;
  73. spin_lock(&rq->lock);
  74. entity = rq->current_entity;
  75. if (entity) {
  76. list_for_each_entry_continue(entity, &rq->entities, list) {
  77. if (amd_sched_entity_is_ready(entity)) {
  78. rq->current_entity = entity;
  79. spin_unlock(&rq->lock);
  80. return entity;
  81. }
  82. }
  83. }
  84. list_for_each_entry(entity, &rq->entities, list) {
  85. if (amd_sched_entity_is_ready(entity)) {
  86. rq->current_entity = entity;
  87. spin_unlock(&rq->lock);
  88. return entity;
  89. }
  90. if (entity == rq->current_entity)
  91. break;
  92. }
  93. spin_unlock(&rq->lock);
  94. return NULL;
  95. }
  96. /**
  97. * Init a context entity used by scheduler when submit to HW ring.
  98. *
  99. * @sched The pointer to the scheduler
  100. * @entity The pointer to a valid amd_sched_entity
  101. * @rq The run queue this entity belongs
  102. * @kernel If this is an entity for the kernel
  103. * @jobs The max number of jobs in the job queue
  104. *
  105. * return 0 if succeed. negative error code on failure
  106. */
  107. int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
  108. struct amd_sched_entity *entity,
  109. struct amd_sched_rq *rq,
  110. uint32_t jobs)
  111. {
  112. int r;
  113. if (!(sched && entity && rq))
  114. return -EINVAL;
  115. memset(entity, 0, sizeof(struct amd_sched_entity));
  116. INIT_LIST_HEAD(&entity->list);
  117. entity->rq = rq;
  118. entity->sched = sched;
  119. spin_lock_init(&entity->queue_lock);
  120. r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
  121. if (r)
  122. return r;
  123. atomic_set(&entity->fence_seq, 0);
  124. entity->fence_context = dma_fence_context_alloc(2);
  125. return 0;
  126. }
  127. /**
  128. * Query if entity is initialized
  129. *
  130. * @sched Pointer to scheduler instance
  131. * @entity The pointer to a valid scheduler entity
  132. *
  133. * return true if entity is initialized, false otherwise
  134. */
  135. static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
  136. struct amd_sched_entity *entity)
  137. {
  138. return entity->sched == sched &&
  139. entity->rq != NULL;
  140. }
  141. /**
  142. * Check if entity is idle
  143. *
  144. * @entity The pointer to a valid scheduler entity
  145. *
  146. * Return true if entity don't has any unscheduled jobs.
  147. */
  148. static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
  149. {
  150. rmb();
  151. if (kfifo_is_empty(&entity->job_queue))
  152. return true;
  153. return false;
  154. }
  155. /**
  156. * Check if entity is ready
  157. *
  158. * @entity The pointer to a valid scheduler entity
  159. *
  160. * Return true if entity could provide a job.
  161. */
  162. static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
  163. {
  164. if (kfifo_is_empty(&entity->job_queue))
  165. return false;
  166. if (ACCESS_ONCE(entity->dependency))
  167. return false;
  168. return true;
  169. }
  170. /**
  171. * Destroy a context entity
  172. *
  173. * @sched Pointer to scheduler instance
  174. * @entity The pointer to a valid scheduler entity
  175. *
  176. * Cleanup and free the allocated resources.
  177. */
  178. void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
  179. struct amd_sched_entity *entity)
  180. {
  181. struct amd_sched_rq *rq = entity->rq;
  182. int r;
  183. if (!amd_sched_entity_is_initialized(sched, entity))
  184. return;
  185. /**
  186. * The client will not queue more IBs during this fini, consume existing
  187. * queued IBs or discard them on SIGKILL
  188. */
  189. if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
  190. r = -ERESTARTSYS;
  191. else
  192. r = wait_event_killable(sched->job_scheduled,
  193. amd_sched_entity_is_idle(entity));
  194. amd_sched_rq_remove_entity(rq, entity);
  195. if (r) {
  196. struct amd_sched_job *job;
  197. /* Park the kernel for a moment to make sure it isn't processing
  198. * our enity.
  199. */
  200. kthread_park(sched->thread);
  201. kthread_unpark(sched->thread);
  202. while (kfifo_out(&entity->job_queue, &job, sizeof(job))) {
  203. struct amd_sched_fence *s_fence = job->s_fence;
  204. amd_sched_fence_scheduled(s_fence);
  205. dma_fence_set_error(&s_fence->finished, -ESRCH);
  206. amd_sched_fence_finished(s_fence);
  207. dma_fence_put(&s_fence->finished);
  208. sched->ops->free_job(job);
  209. }
  210. }
  211. kfifo_free(&entity->job_queue);
  212. }
  213. static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
  214. {
  215. struct amd_sched_entity *entity =
  216. container_of(cb, struct amd_sched_entity, cb);
  217. entity->dependency = NULL;
  218. dma_fence_put(f);
  219. amd_sched_wakeup(entity->sched);
  220. }
  221. static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
  222. {
  223. struct amd_sched_entity *entity =
  224. container_of(cb, struct amd_sched_entity, cb);
  225. entity->dependency = NULL;
  226. dma_fence_put(f);
  227. }
  228. bool amd_sched_dependency_optimized(struct dma_fence* fence,
  229. struct amd_sched_entity *entity)
  230. {
  231. struct amd_gpu_scheduler *sched = entity->sched;
  232. struct amd_sched_fence *s_fence;
  233. if (!fence || dma_fence_is_signaled(fence))
  234. return false;
  235. if (fence->context == entity->fence_context)
  236. return true;
  237. s_fence = to_amd_sched_fence(fence);
  238. if (s_fence && s_fence->sched == sched)
  239. return true;
  240. return false;
  241. }
  242. static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
  243. {
  244. struct amd_gpu_scheduler *sched = entity->sched;
  245. struct dma_fence * fence = entity->dependency;
  246. struct amd_sched_fence *s_fence;
  247. if (fence->context == entity->fence_context) {
  248. /* We can ignore fences from ourself */
  249. dma_fence_put(entity->dependency);
  250. return false;
  251. }
  252. s_fence = to_amd_sched_fence(fence);
  253. if (s_fence && s_fence->sched == sched) {
  254. /*
  255. * Fence is from the same scheduler, only need to wait for
  256. * it to be scheduled
  257. */
  258. fence = dma_fence_get(&s_fence->scheduled);
  259. dma_fence_put(entity->dependency);
  260. entity->dependency = fence;
  261. if (!dma_fence_add_callback(fence, &entity->cb,
  262. amd_sched_entity_clear_dep))
  263. return true;
  264. /* Ignore it when it is already scheduled */
  265. dma_fence_put(fence);
  266. return false;
  267. }
  268. if (!dma_fence_add_callback(entity->dependency, &entity->cb,
  269. amd_sched_entity_wakeup))
  270. return true;
  271. dma_fence_put(entity->dependency);
  272. return false;
  273. }
  274. static struct amd_sched_job *
  275. amd_sched_entity_peek_job(struct amd_sched_entity *entity)
  276. {
  277. struct amd_gpu_scheduler *sched = entity->sched;
  278. struct amd_sched_job *sched_job;
  279. if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
  280. return NULL;
  281. while ((entity->dependency = sched->ops->dependency(sched_job)))
  282. if (amd_sched_entity_add_dependency_cb(entity))
  283. return NULL;
  284. return sched_job;
  285. }
  286. /**
  287. * Helper to submit a job to the job queue
  288. *
  289. * @sched_job The pointer to job required to submit
  290. *
  291. * Returns true if we could submit the job.
  292. */
  293. static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
  294. {
  295. struct amd_gpu_scheduler *sched = sched_job->sched;
  296. struct amd_sched_entity *entity = sched_job->s_entity;
  297. bool added, first = false;
  298. spin_lock(&entity->queue_lock);
  299. added = kfifo_in(&entity->job_queue, &sched_job,
  300. sizeof(sched_job)) == sizeof(sched_job);
  301. if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
  302. first = true;
  303. spin_unlock(&entity->queue_lock);
  304. /* first job wakes up scheduler */
  305. if (first) {
  306. /* Add the entity to the run queue */
  307. amd_sched_rq_add_entity(entity->rq, entity);
  308. amd_sched_wakeup(sched);
  309. }
  310. return added;
  311. }
  312. /* job_finish is called after hw fence signaled
  313. */
  314. static void amd_sched_job_finish(struct work_struct *work)
  315. {
  316. struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
  317. finish_work);
  318. struct amd_gpu_scheduler *sched = s_job->sched;
  319. /* remove job from ring_mirror_list */
  320. spin_lock(&sched->job_list_lock);
  321. list_del_init(&s_job->node);
  322. if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
  323. struct amd_sched_job *next;
  324. spin_unlock(&sched->job_list_lock);
  325. cancel_delayed_work_sync(&s_job->work_tdr);
  326. spin_lock(&sched->job_list_lock);
  327. /* queue TDR for next job */
  328. next = list_first_entry_or_null(&sched->ring_mirror_list,
  329. struct amd_sched_job, node);
  330. if (next)
  331. schedule_delayed_work(&next->work_tdr, sched->timeout);
  332. }
  333. spin_unlock(&sched->job_list_lock);
  334. sched->ops->free_job(s_job);
  335. }
  336. static void amd_sched_job_finish_cb(struct dma_fence *f,
  337. struct dma_fence_cb *cb)
  338. {
  339. struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
  340. finish_cb);
  341. schedule_work(&job->finish_work);
  342. }
  343. static void amd_sched_job_begin(struct amd_sched_job *s_job)
  344. {
  345. struct amd_gpu_scheduler *sched = s_job->sched;
  346. dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
  347. amd_sched_job_finish_cb);
  348. spin_lock(&sched->job_list_lock);
  349. list_add_tail(&s_job->node, &sched->ring_mirror_list);
  350. if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
  351. list_first_entry_or_null(&sched->ring_mirror_list,
  352. struct amd_sched_job, node) == s_job)
  353. schedule_delayed_work(&s_job->work_tdr, sched->timeout);
  354. spin_unlock(&sched->job_list_lock);
  355. }
  356. static void amd_sched_job_timedout(struct work_struct *work)
  357. {
  358. struct amd_sched_job *job = container_of(work, struct amd_sched_job,
  359. work_tdr.work);
  360. job->sched->ops->timedout_job(job);
  361. }
  362. void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
  363. {
  364. struct amd_sched_job *s_job;
  365. spin_lock(&sched->job_list_lock);
  366. list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
  367. if (s_job->s_fence->parent &&
  368. dma_fence_remove_callback(s_job->s_fence->parent,
  369. &s_job->s_fence->cb)) {
  370. dma_fence_put(s_job->s_fence->parent);
  371. s_job->s_fence->parent = NULL;
  372. atomic_dec(&sched->hw_rq_count);
  373. }
  374. }
  375. spin_unlock(&sched->job_list_lock);
  376. }
  377. void amd_sched_job_kickout(struct amd_sched_job *s_job)
  378. {
  379. struct amd_gpu_scheduler *sched = s_job->sched;
  380. spin_lock(&sched->job_list_lock);
  381. list_del_init(&s_job->node);
  382. spin_unlock(&sched->job_list_lock);
  383. }
  384. void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
  385. {
  386. struct amd_sched_job *s_job, *tmp;
  387. int r;
  388. spin_lock(&sched->job_list_lock);
  389. s_job = list_first_entry_or_null(&sched->ring_mirror_list,
  390. struct amd_sched_job, node);
  391. if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
  392. schedule_delayed_work(&s_job->work_tdr, sched->timeout);
  393. list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
  394. struct amd_sched_fence *s_fence = s_job->s_fence;
  395. struct dma_fence *fence;
  396. spin_unlock(&sched->job_list_lock);
  397. fence = sched->ops->run_job(s_job);
  398. atomic_inc(&sched->hw_rq_count);
  399. if (fence) {
  400. s_fence->parent = dma_fence_get(fence);
  401. r = dma_fence_add_callback(fence, &s_fence->cb,
  402. amd_sched_process_job);
  403. if (r == -ENOENT)
  404. amd_sched_process_job(fence, &s_fence->cb);
  405. else if (r)
  406. DRM_ERROR("fence add callback failed (%d)\n",
  407. r);
  408. dma_fence_put(fence);
  409. } else {
  410. DRM_ERROR("Failed to run job!\n");
  411. amd_sched_process_job(NULL, &s_fence->cb);
  412. }
  413. spin_lock(&sched->job_list_lock);
  414. }
  415. spin_unlock(&sched->job_list_lock);
  416. }
  417. /**
  418. * Submit a job to the job queue
  419. *
  420. * @sched_job The pointer to job required to submit
  421. *
  422. * Returns 0 for success, negative error code otherwise.
  423. */
  424. void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
  425. {
  426. struct amd_sched_entity *entity = sched_job->s_entity;
  427. trace_amd_sched_job(sched_job);
  428. wait_event(entity->sched->job_scheduled,
  429. amd_sched_entity_in(sched_job));
  430. }
  431. /* init a sched_job with basic field */
  432. int amd_sched_job_init(struct amd_sched_job *job,
  433. struct amd_gpu_scheduler *sched,
  434. struct amd_sched_entity *entity,
  435. void *owner)
  436. {
  437. job->sched = sched;
  438. job->s_entity = entity;
  439. job->s_fence = amd_sched_fence_create(entity, owner);
  440. if (!job->s_fence)
  441. return -ENOMEM;
  442. job->id = atomic64_inc_return(&sched->job_id_count);
  443. INIT_WORK(&job->finish_work, amd_sched_job_finish);
  444. INIT_LIST_HEAD(&job->node);
  445. INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);
  446. return 0;
  447. }
  448. /**
  449. * Return ture if we can push more jobs to the hw.
  450. */
  451. static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
  452. {
  453. return atomic_read(&sched->hw_rq_count) <
  454. sched->hw_submission_limit;
  455. }
  456. /**
  457. * Wake up the scheduler when it is ready
  458. */
  459. static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
  460. {
  461. if (amd_sched_ready(sched))
  462. wake_up_interruptible(&sched->wake_up_worker);
  463. }
  464. /**
  465. * Select next entity to process
  466. */
  467. static struct amd_sched_entity *
  468. amd_sched_select_entity(struct amd_gpu_scheduler *sched)
  469. {
  470. struct amd_sched_entity *entity;
  471. int i;
  472. if (!amd_sched_ready(sched))
  473. return NULL;
  474. /* Kernel run queue has higher priority than normal run queue*/
  475. for (i = AMD_SCHED_PRIORITY_MAX - 1; i >= AMD_SCHED_PRIORITY_MIN; i--) {
  476. entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
  477. if (entity)
  478. break;
  479. }
  480. return entity;
  481. }
  482. static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
  483. {
  484. struct amd_sched_fence *s_fence =
  485. container_of(cb, struct amd_sched_fence, cb);
  486. struct amd_gpu_scheduler *sched = s_fence->sched;
  487. atomic_dec(&sched->hw_rq_count);
  488. amd_sched_fence_finished(s_fence);
  489. trace_amd_sched_process_job(s_fence);
  490. dma_fence_put(&s_fence->finished);
  491. wake_up_interruptible(&sched->wake_up_worker);
  492. }
  493. static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
  494. {
  495. if (kthread_should_park()) {
  496. kthread_parkme();
  497. return true;
  498. }
  499. return false;
  500. }
  501. static int amd_sched_main(void *param)
  502. {
  503. struct sched_param sparam = {.sched_priority = 1};
  504. struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
  505. int r, count;
  506. sched_setscheduler(current, SCHED_FIFO, &sparam);
  507. while (!kthread_should_stop()) {
  508. struct amd_sched_entity *entity = NULL;
  509. struct amd_sched_fence *s_fence;
  510. struct amd_sched_job *sched_job;
  511. struct dma_fence *fence;
  512. wait_event_interruptible(sched->wake_up_worker,
  513. (!amd_sched_blocked(sched) &&
  514. (entity = amd_sched_select_entity(sched))) ||
  515. kthread_should_stop());
  516. if (!entity)
  517. continue;
  518. sched_job = amd_sched_entity_peek_job(entity);
  519. if (!sched_job)
  520. continue;
  521. s_fence = sched_job->s_fence;
  522. atomic_inc(&sched->hw_rq_count);
  523. amd_sched_job_begin(sched_job);
  524. fence = sched->ops->run_job(sched_job);
  525. amd_sched_fence_scheduled(s_fence);
  526. /* amd_sched_process_job drops the job's reference of the fence. */
  527. sched_job->s_fence = NULL;
  528. if (fence) {
  529. s_fence->parent = dma_fence_get(fence);
  530. r = dma_fence_add_callback(fence, &s_fence->cb,
  531. amd_sched_process_job);
  532. if (r == -ENOENT)
  533. amd_sched_process_job(fence, &s_fence->cb);
  534. else if (r)
  535. DRM_ERROR("fence add callback failed (%d)\n",
  536. r);
  537. dma_fence_put(fence);
  538. } else {
  539. DRM_ERROR("Failed to run job!\n");
  540. amd_sched_process_job(NULL, &s_fence->cb);
  541. }
  542. count = kfifo_out(&entity->job_queue, &sched_job,
  543. sizeof(sched_job));
  544. WARN_ON(count != sizeof(sched_job));
  545. wake_up(&sched->job_scheduled);
  546. }
  547. return 0;
  548. }
  549. /**
  550. * Init a gpu scheduler instance
  551. *
  552. * @sched The pointer to the scheduler
  553. * @ops The backend operations for this scheduler.
  554. * @hw_submissions Number of hw submissions to do.
  555. * @name Name used for debugging
  556. *
  557. * Return 0 on success, otherwise error code.
  558. */
  559. int amd_sched_init(struct amd_gpu_scheduler *sched,
  560. const struct amd_sched_backend_ops *ops,
  561. unsigned hw_submission, long timeout, const char *name)
  562. {
  563. int i;
  564. sched->ops = ops;
  565. sched->hw_submission_limit = hw_submission;
  566. sched->name = name;
  567. sched->timeout = timeout;
  568. for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_MAX; i++)
  569. amd_sched_rq_init(&sched->sched_rq[i]);
  570. init_waitqueue_head(&sched->wake_up_worker);
  571. init_waitqueue_head(&sched->job_scheduled);
  572. INIT_LIST_HEAD(&sched->ring_mirror_list);
  573. spin_lock_init(&sched->job_list_lock);
  574. atomic_set(&sched->hw_rq_count, 0);
  575. atomic64_set(&sched->job_id_count, 0);
  576. /* Each scheduler will run on a seperate kernel thread */
  577. sched->thread = kthread_run(amd_sched_main, sched, sched->name);
  578. if (IS_ERR(sched->thread)) {
  579. DRM_ERROR("Failed to create scheduler for %s.\n", name);
  580. return PTR_ERR(sched->thread);
  581. }
  582. return 0;
  583. }
  584. /**
  585. * Destroy a gpu scheduler
  586. *
  587. * @sched The pointer to the scheduler
  588. */
  589. void amd_sched_fini(struct amd_gpu_scheduler *sched)
  590. {
  591. if (sched->thread)
  592. kthread_stop(sched->thread);
  593. }