sched_entity.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524
  1. /*
  2. * Copyright 2015 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/kthread.h>
  24. #include <drm/gpu_scheduler.h>
  25. #include "gpu_scheduler_trace.h"
  26. #define to_drm_sched_job(sched_job) \
  27. container_of((sched_job), struct drm_sched_job, queue_node)
  28. /**
  29. * drm_sched_entity_init - Init a context entity used by scheduler when
  30. * submit to HW ring.
  31. *
  32. * @entity: scheduler entity to init
  33. * @rq_list: the list of run queue on which jobs from this
  34. * entity can be submitted
  35. * @num_rq_list: number of run queue in rq_list
  36. * @guilty: atomic_t set to 1 when a job on this queue
  37. * is found to be guilty causing a timeout
  38. *
  39. * Note: the rq_list should have atleast one element to schedule
  40. * the entity
  41. *
  42. * Returns 0 on success or a negative error code on failure.
  43. */
  44. int drm_sched_entity_init(struct drm_sched_entity *entity,
  45. struct drm_sched_rq **rq_list,
  46. unsigned int num_rq_list,
  47. atomic_t *guilty)
  48. {
  49. int i;
  50. if (!(entity && rq_list && num_rq_list > 0 && rq_list[0]))
  51. return -EINVAL;
  52. memset(entity, 0, sizeof(struct drm_sched_entity));
  53. INIT_LIST_HEAD(&entity->list);
  54. entity->rq = rq_list[0];
  55. entity->guilty = guilty;
  56. entity->num_rq_list = num_rq_list;
  57. entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *),
  58. GFP_KERNEL);
  59. if (!entity->rq_list)
  60. return -ENOMEM;
  61. for (i = 0; i < num_rq_list; ++i)
  62. entity->rq_list[i] = rq_list[i];
  63. entity->last_scheduled = NULL;
  64. spin_lock_init(&entity->rq_lock);
  65. spsc_queue_init(&entity->job_queue);
  66. atomic_set(&entity->fence_seq, 0);
  67. entity->fence_context = dma_fence_context_alloc(2);
  68. return 0;
  69. }
  70. EXPORT_SYMBOL(drm_sched_entity_init);
  71. /**
  72. * drm_sched_entity_is_idle - Check if entity is idle
  73. *
  74. * @entity: scheduler entity
  75. *
  76. * Returns true if the entity does not have any unscheduled jobs.
  77. */
  78. static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
  79. {
  80. rmb(); /* for list_empty to work without lock */
  81. if (list_empty(&entity->list) ||
  82. spsc_queue_peek(&entity->job_queue) == NULL)
  83. return true;
  84. return false;
  85. }
  86. /**
  87. * drm_sched_entity_is_ready - Check if entity is ready
  88. *
  89. * @entity: scheduler entity
  90. *
  91. * Return true if entity could provide a job.
  92. */
  93. bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
  94. {
  95. if (spsc_queue_peek(&entity->job_queue) == NULL)
  96. return false;
  97. if (READ_ONCE(entity->dependency))
  98. return false;
  99. return true;
  100. }
  101. /**
  102. * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load
  103. *
  104. * @entity: scheduler entity
  105. *
  106. * Return the pointer to the rq with least load.
  107. */
  108. static struct drm_sched_rq *
  109. drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
  110. {
  111. struct drm_sched_rq *rq = NULL;
  112. unsigned int min_jobs = UINT_MAX, num_jobs;
  113. int i;
  114. for (i = 0; i < entity->num_rq_list; ++i) {
  115. num_jobs = atomic_read(&entity->rq_list[i]->sched->num_jobs);
  116. if (num_jobs < min_jobs) {
  117. min_jobs = num_jobs;
  118. rq = entity->rq_list[i];
  119. }
  120. }
  121. return rq;
  122. }
  123. /**
  124. * drm_sched_entity_flush - Flush a context entity
  125. *
  126. * @entity: scheduler entity
  127. * @timeout: time to wait in for Q to become empty in jiffies.
  128. *
  129. * Splitting drm_sched_entity_fini() into two functions, The first one does the
  130. * waiting, removes the entity from the runqueue and returns an error when the
  131. * process was killed.
  132. *
  133. * Returns the remaining time in jiffies left from the input timeout
  134. */
  135. long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
  136. {
  137. struct drm_gpu_scheduler *sched;
  138. struct task_struct *last_user;
  139. long ret = timeout;
  140. sched = entity->rq->sched;
  141. /**
  142. * The client will not queue more IBs during this fini, consume existing
  143. * queued IBs or discard them on SIGKILL
  144. */
  145. if (current->flags & PF_EXITING) {
  146. if (timeout)
  147. ret = wait_event_timeout(
  148. sched->job_scheduled,
  149. drm_sched_entity_is_idle(entity),
  150. timeout);
  151. } else {
  152. wait_event_killable(sched->job_scheduled,
  153. drm_sched_entity_is_idle(entity));
  154. }
  155. /* For killed process disable any more IBs enqueue right now */
  156. last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
  157. if ((!last_user || last_user == current->group_leader) &&
  158. (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) {
  159. spin_lock(&entity->rq_lock);
  160. entity->stopped = true;
  161. drm_sched_rq_remove_entity(entity->rq, entity);
  162. spin_unlock(&entity->rq_lock);
  163. }
  164. return ret;
  165. }
  166. EXPORT_SYMBOL(drm_sched_entity_flush);
  167. /**
  168. * drm_sched_entity_kill_jobs - helper for drm_sched_entity_kill_jobs
  169. *
  170. * @f: signaled fence
  171. * @cb: our callback structure
  172. *
  173. * Signal the scheduler finished fence when the entity in question is killed.
  174. */
  175. static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
  176. struct dma_fence_cb *cb)
  177. {
  178. struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
  179. finish_cb);
  180. drm_sched_fence_finished(job->s_fence);
  181. WARN_ON(job->s_fence->parent);
  182. dma_fence_put(&job->s_fence->finished);
  183. job->sched->ops->free_job(job);
  184. }
  185. /**
  186. * drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed
  187. *
  188. * @entity: entity which is cleaned up
  189. *
  190. * Makes sure that all remaining jobs in an entity are killed before it is
  191. * destroyed.
  192. */
  193. static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
  194. {
  195. struct drm_sched_job *job;
  196. int r;
  197. while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
  198. struct drm_sched_fence *s_fence = job->s_fence;
  199. drm_sched_fence_scheduled(s_fence);
  200. dma_fence_set_error(&s_fence->finished, -ESRCH);
  201. /*
  202. * When pipe is hanged by older entity, new entity might
  203. * not even have chance to submit it's first job to HW
  204. * and so entity->last_scheduled will remain NULL
  205. */
  206. if (!entity->last_scheduled) {
  207. drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
  208. continue;
  209. }
  210. r = dma_fence_add_callback(entity->last_scheduled,
  211. &job->finish_cb,
  212. drm_sched_entity_kill_jobs_cb);
  213. if (r == -ENOENT)
  214. drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
  215. else if (r)
  216. DRM_ERROR("fence add callback failed (%d)\n", r);
  217. }
  218. }
  219. /**
  220. * drm_sched_entity_cleanup - Destroy a context entity
  221. *
  222. * @entity: scheduler entity
  223. *
  224. * This should be called after @drm_sched_entity_do_release. It goes over the
  225. * entity and signals all jobs with an error code if the process was killed.
  226. *
  227. */
  228. void drm_sched_entity_fini(struct drm_sched_entity *entity)
  229. {
  230. struct drm_gpu_scheduler *sched;
  231. sched = entity->rq->sched;
  232. drm_sched_rq_remove_entity(entity->rq, entity);
  233. /* Consumption of existing IBs wasn't completed. Forcefully
  234. * remove them here.
  235. */
  236. if (spsc_queue_peek(&entity->job_queue)) {
  237. /* Park the kernel for a moment to make sure it isn't processing
  238. * our enity.
  239. */
  240. kthread_park(sched->thread);
  241. kthread_unpark(sched->thread);
  242. if (entity->dependency) {
  243. dma_fence_remove_callback(entity->dependency,
  244. &entity->cb);
  245. dma_fence_put(entity->dependency);
  246. entity->dependency = NULL;
  247. }
  248. drm_sched_entity_kill_jobs(entity);
  249. }
  250. dma_fence_put(entity->last_scheduled);
  251. entity->last_scheduled = NULL;
  252. kfree(entity->rq_list);
  253. }
  254. EXPORT_SYMBOL(drm_sched_entity_fini);
  255. /**
  256. * drm_sched_entity_fini - Destroy a context entity
  257. *
  258. * @entity: scheduler entity
  259. *
  260. * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
  261. */
  262. void drm_sched_entity_destroy(struct drm_sched_entity *entity)
  263. {
  264. drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
  265. drm_sched_entity_fini(entity);
  266. }
  267. EXPORT_SYMBOL(drm_sched_entity_destroy);
  268. /**
  269. * drm_sched_entity_clear_dep - callback to clear the entities dependency
  270. */
  271. static void drm_sched_entity_clear_dep(struct dma_fence *f,
  272. struct dma_fence_cb *cb)
  273. {
  274. struct drm_sched_entity *entity =
  275. container_of(cb, struct drm_sched_entity, cb);
  276. entity->dependency = NULL;
  277. dma_fence_put(f);
  278. }
  279. /**
  280. * drm_sched_entity_clear_dep - callback to clear the entities dependency and
  281. * wake up scheduler
  282. */
  283. static void drm_sched_entity_wakeup(struct dma_fence *f,
  284. struct dma_fence_cb *cb)
  285. {
  286. struct drm_sched_entity *entity =
  287. container_of(cb, struct drm_sched_entity, cb);
  288. drm_sched_entity_clear_dep(f, cb);
  289. drm_sched_wakeup(entity->rq->sched);
  290. }
  291. /**
  292. * drm_sched_entity_set_rq_priority - helper for drm_sched_entity_set_priority
  293. */
  294. static void drm_sched_entity_set_rq_priority(struct drm_sched_rq **rq,
  295. enum drm_sched_priority priority)
  296. {
  297. *rq = &(*rq)->sched->sched_rq[priority];
  298. }
  299. /**
  300. * drm_sched_entity_set_priority - Sets priority of the entity
  301. *
  302. * @entity: scheduler entity
  303. * @priority: scheduler priority
  304. *
  305. * Update the priority of runqueus used for the entity.
  306. */
  307. void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
  308. enum drm_sched_priority priority)
  309. {
  310. unsigned int i;
  311. spin_lock(&entity->rq_lock);
  312. for (i = 0; i < entity->num_rq_list; ++i)
  313. drm_sched_entity_set_rq_priority(&entity->rq_list[i], priority);
  314. drm_sched_rq_remove_entity(entity->rq, entity);
  315. drm_sched_entity_set_rq_priority(&entity->rq, priority);
  316. drm_sched_rq_add_entity(entity->rq, entity);
  317. spin_unlock(&entity->rq_lock);
  318. }
  319. EXPORT_SYMBOL(drm_sched_entity_set_priority);
  320. /**
  321. * drm_sched_entity_add_dependency_cb - add callback for the entities dependency
  322. *
  323. * @entity: entity with dependency
  324. *
  325. * Add a callback to the current dependency of the entity to wake up the
  326. * scheduler when the entity becomes available.
  327. */
  328. static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
  329. {
  330. struct drm_gpu_scheduler *sched = entity->rq->sched;
  331. struct dma_fence *fence = entity->dependency;
  332. struct drm_sched_fence *s_fence;
  333. if (fence->context == entity->fence_context ||
  334. fence->context == entity->fence_context + 1) {
  335. /*
  336. * Fence is a scheduled/finished fence from a job
  337. * which belongs to the same entity, we can ignore
  338. * fences from ourself
  339. */
  340. dma_fence_put(entity->dependency);
  341. return false;
  342. }
  343. s_fence = to_drm_sched_fence(fence);
  344. if (s_fence && s_fence->sched == sched) {
  345. /*
  346. * Fence is from the same scheduler, only need to wait for
  347. * it to be scheduled
  348. */
  349. fence = dma_fence_get(&s_fence->scheduled);
  350. dma_fence_put(entity->dependency);
  351. entity->dependency = fence;
  352. if (!dma_fence_add_callback(fence, &entity->cb,
  353. drm_sched_entity_clear_dep))
  354. return true;
  355. /* Ignore it when it is already scheduled */
  356. dma_fence_put(fence);
  357. return false;
  358. }
  359. if (!dma_fence_add_callback(entity->dependency, &entity->cb,
  360. drm_sched_entity_wakeup))
  361. return true;
  362. dma_fence_put(entity->dependency);
  363. return false;
  364. }
  365. /**
  366. * drm_sched_entity_pop_job - get a ready to be scheduled job from the entity
  367. *
  368. * @entity: entity to get the job from
  369. *
  370. * Process all dependencies and try to get one job from the entities queue.
  371. */
  372. struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
  373. {
  374. struct drm_gpu_scheduler *sched = entity->rq->sched;
  375. struct drm_sched_job *sched_job;
  376. sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
  377. if (!sched_job)
  378. return NULL;
  379. while ((entity->dependency =
  380. sched->ops->dependency(sched_job, entity))) {
  381. if (drm_sched_entity_add_dependency_cb(entity)) {
  382. trace_drm_sched_job_wait_dep(sched_job,
  383. entity->dependency);
  384. return NULL;
  385. }
  386. }
  387. /* skip jobs from entity that marked guilty */
  388. if (entity->guilty && atomic_read(entity->guilty))
  389. dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
  390. dma_fence_put(entity->last_scheduled);
  391. entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
  392. spsc_queue_pop(&entity->job_queue);
  393. return sched_job;
  394. }
  395. /**
  396. * drm_sched_entity_select_rq - select a new rq for the entity
  397. *
  398. * @entity: scheduler entity
  399. *
  400. * Check all prerequisites and select a new rq for the entity for load
  401. * balancing.
  402. */
  403. void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
  404. {
  405. struct dma_fence *fence;
  406. struct drm_sched_rq *rq;
  407. if (spsc_queue_count(&entity->job_queue) || entity->num_rq_list <= 1)
  408. return;
  409. fence = READ_ONCE(entity->last_scheduled);
  410. if (fence && !dma_fence_is_signaled(fence))
  411. return;
  412. rq = drm_sched_entity_get_free_sched(entity);
  413. if (rq == entity->rq)
  414. return;
  415. spin_lock(&entity->rq_lock);
  416. drm_sched_rq_remove_entity(entity->rq, entity);
  417. entity->rq = rq;
  418. spin_unlock(&entity->rq_lock);
  419. }
  420. /**
  421. * drm_sched_entity_push_job - Submit a job to the entity's job queue
  422. *
  423. * @sched_job: job to submit
  424. * @entity: scheduler entity
  425. *
  426. * Note: To guarantee that the order of insertion to queue matches
  427. * the job's fence sequence number this function should be
  428. * called with drm_sched_job_init under common lock.
  429. *
  430. * Returns 0 for success, negative error code otherwise.
  431. */
  432. void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
  433. struct drm_sched_entity *entity)
  434. {
  435. bool first;
  436. trace_drm_sched_job(sched_job, entity);
  437. atomic_inc(&entity->rq->sched->num_jobs);
  438. WRITE_ONCE(entity->last_user, current->group_leader);
  439. first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
  440. /* first job wakes up scheduler */
  441. if (first) {
  442. /* Add the entity to the run queue */
  443. spin_lock(&entity->rq_lock);
  444. if (entity->stopped) {
  445. spin_unlock(&entity->rq_lock);
  446. DRM_ERROR("Trying to push to a killed entity\n");
  447. return;
  448. }
  449. drm_sched_rq_add_entity(entity->rq, entity);
  450. spin_unlock(&entity->rq_lock);
  451. drm_sched_wakeup(entity->rq->sched);
  452. }
  453. }
  454. EXPORT_SYMBOL(drm_sched_entity_push_job);