amdgpu_sa.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462
  1. /*
  2. * Copyright 2011 Red Hat Inc.
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the
  7. * "Software"), to deal in the Software without restriction, including
  8. * without limitation the rights to use, copy, modify, merge, publish,
  9. * distribute, sub license, and/or sell copies of the Software, and to
  10. * permit persons to whom the Software is furnished to do so, subject to
  11. * the following conditions:
  12. *
  13. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20. *
  21. * The above copyright notice and this permission notice (including the
  22. * next paragraph) shall be included in all copies or substantial portions
  23. * of the Software.
  24. *
  25. */
  26. /*
  27. * Authors:
  28. * Jerome Glisse <glisse@freedesktop.org>
  29. */
  30. /* Algorithm:
  31. *
  32. * We store the last allocated bo in "hole", we always try to allocate
  33. * after the last allocated bo. Principle is that in a linear GPU ring
  34. * progression was is after last is the oldest bo we allocated and thus
  35. * the first one that should no longer be in use by the GPU.
  36. *
  37. * If it's not the case we skip over the bo after last to the closest
  38. * done bo if such one exist. If none exist and we are not asked to
  39. * block we report failure to allocate.
  40. *
  41. * If we are asked to block we wait on all the oldest fence of all
  42. * rings. We just wait for any of those fence to complete.
  43. */
  44. #include <drm/drmP.h>
  45. #include "amdgpu.h"
  46. static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo);
  47. static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager);
  48. int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
  49. struct amdgpu_sa_manager *sa_manager,
  50. unsigned size, u32 align, u32 domain)
  51. {
  52. int i, r;
  53. init_waitqueue_head(&sa_manager->wq);
  54. sa_manager->bo = NULL;
  55. sa_manager->size = size;
  56. sa_manager->domain = domain;
  57. sa_manager->align = align;
  58. sa_manager->hole = &sa_manager->olist;
  59. INIT_LIST_HEAD(&sa_manager->olist);
  60. for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
  61. INIT_LIST_HEAD(&sa_manager->flist[i]);
  62. }
  63. r = amdgpu_bo_create(adev, size, align, true, domain,
  64. 0, NULL, NULL, &sa_manager->bo);
  65. if (r) {
  66. dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
  67. return r;
  68. }
  69. return r;
  70. }
  71. void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
  72. struct amdgpu_sa_manager *sa_manager)
  73. {
  74. struct amdgpu_sa_bo *sa_bo, *tmp;
  75. if (!list_empty(&sa_manager->olist)) {
  76. sa_manager->hole = &sa_manager->olist,
  77. amdgpu_sa_bo_try_free(sa_manager);
  78. if (!list_empty(&sa_manager->olist)) {
  79. dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n");
  80. }
  81. }
  82. list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
  83. amdgpu_sa_bo_remove_locked(sa_bo);
  84. }
  85. amdgpu_bo_unref(&sa_manager->bo);
  86. sa_manager->size = 0;
  87. }
  88. int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
  89. struct amdgpu_sa_manager *sa_manager)
  90. {
  91. int r;
  92. if (sa_manager->bo == NULL) {
  93. dev_err(adev->dev, "no bo for sa manager\n");
  94. return -EINVAL;
  95. }
  96. /* map the buffer */
  97. r = amdgpu_bo_reserve(sa_manager->bo, false);
  98. if (r) {
  99. dev_err(adev->dev, "(%d) failed to reserve manager bo\n", r);
  100. return r;
  101. }
  102. r = amdgpu_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
  103. if (r) {
  104. amdgpu_bo_unreserve(sa_manager->bo);
  105. dev_err(adev->dev, "(%d) failed to pin manager bo\n", r);
  106. return r;
  107. }
  108. r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
  109. amdgpu_bo_unreserve(sa_manager->bo);
  110. return r;
  111. }
  112. int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
  113. struct amdgpu_sa_manager *sa_manager)
  114. {
  115. int r;
  116. if (sa_manager->bo == NULL) {
  117. dev_err(adev->dev, "no bo for sa manager\n");
  118. return -EINVAL;
  119. }
  120. r = amdgpu_bo_reserve(sa_manager->bo, false);
  121. if (!r) {
  122. amdgpu_bo_kunmap(sa_manager->bo);
  123. amdgpu_bo_unpin(sa_manager->bo);
  124. amdgpu_bo_unreserve(sa_manager->bo);
  125. }
  126. return r;
  127. }
  128. static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f)
  129. {
  130. struct amdgpu_fence *a_fence;
  131. struct amd_sched_fence *s_fence;
  132. s_fence = to_amd_sched_fence(f);
  133. if (s_fence) {
  134. struct amdgpu_ring *ring;
  135. ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
  136. return ring->idx;
  137. }
  138. a_fence = to_amdgpu_fence(f);
  139. if (a_fence)
  140. return a_fence->ring->idx;
  141. return 0;
  142. }
  143. static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
  144. {
  145. struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
  146. if (sa_manager->hole == &sa_bo->olist) {
  147. sa_manager->hole = sa_bo->olist.prev;
  148. }
  149. list_del_init(&sa_bo->olist);
  150. list_del_init(&sa_bo->flist);
  151. fence_put(sa_bo->fence);
  152. kfree(sa_bo);
  153. }
  154. static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
  155. {
  156. struct amdgpu_sa_bo *sa_bo, *tmp;
  157. if (sa_manager->hole->next == &sa_manager->olist)
  158. return;
  159. sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
  160. list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
  161. if (sa_bo->fence == NULL ||
  162. !fence_is_signaled(sa_bo->fence)) {
  163. return;
  164. }
  165. amdgpu_sa_bo_remove_locked(sa_bo);
  166. }
  167. }
  168. static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager)
  169. {
  170. struct list_head *hole = sa_manager->hole;
  171. if (hole != &sa_manager->olist) {
  172. return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset;
  173. }
  174. return 0;
  175. }
  176. static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager)
  177. {
  178. struct list_head *hole = sa_manager->hole;
  179. if (hole->next != &sa_manager->olist) {
  180. return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset;
  181. }
  182. return sa_manager->size;
  183. }
  184. static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager,
  185. struct amdgpu_sa_bo *sa_bo,
  186. unsigned size, unsigned align)
  187. {
  188. unsigned soffset, eoffset, wasted;
  189. soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
  190. eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
  191. wasted = (align - (soffset % align)) % align;
  192. if ((eoffset - soffset) >= (size + wasted)) {
  193. soffset += wasted;
  194. sa_bo->manager = sa_manager;
  195. sa_bo->soffset = soffset;
  196. sa_bo->eoffset = soffset + size;
  197. list_add(&sa_bo->olist, sa_manager->hole);
  198. INIT_LIST_HEAD(&sa_bo->flist);
  199. sa_manager->hole = &sa_bo->olist;
  200. return true;
  201. }
  202. return false;
  203. }
  204. /**
  205. * amdgpu_sa_event - Check if we can stop waiting
  206. *
  207. * @sa_manager: pointer to the sa_manager
  208. * @size: number of bytes we want to allocate
  209. * @align: alignment we need to match
  210. *
  211. * Check if either there is a fence we can wait for or
  212. * enough free memory to satisfy the allocation directly
  213. */
  214. static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
  215. unsigned size, unsigned align)
  216. {
  217. unsigned soffset, eoffset, wasted;
  218. int i;
  219. for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
  220. if (!list_empty(&sa_manager->flist[i])) {
  221. return true;
  222. }
  223. }
  224. soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
  225. eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
  226. wasted = (align - (soffset % align)) % align;
  227. if ((eoffset - soffset) >= (size + wasted)) {
  228. return true;
  229. }
  230. return false;
  231. }
  232. static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
  233. struct fence **fences,
  234. unsigned *tries)
  235. {
  236. struct amdgpu_sa_bo *best_bo = NULL;
  237. unsigned i, soffset, best, tmp;
  238. /* if hole points to the end of the buffer */
  239. if (sa_manager->hole->next == &sa_manager->olist) {
  240. /* try again with its beginning */
  241. sa_manager->hole = &sa_manager->olist;
  242. return true;
  243. }
  244. soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
  245. /* to handle wrap around we add sa_manager->size */
  246. best = sa_manager->size * 2;
  247. /* go over all fence list and try to find the closest sa_bo
  248. * of the current last
  249. */
  250. for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
  251. struct amdgpu_sa_bo *sa_bo;
  252. if (list_empty(&sa_manager->flist[i])) {
  253. continue;
  254. }
  255. sa_bo = list_first_entry(&sa_manager->flist[i],
  256. struct amdgpu_sa_bo, flist);
  257. if (!fence_is_signaled(sa_bo->fence)) {
  258. fences[i] = sa_bo->fence;
  259. continue;
  260. }
  261. /* limit the number of tries each ring gets */
  262. if (tries[i] > 2) {
  263. continue;
  264. }
  265. tmp = sa_bo->soffset;
  266. if (tmp < soffset) {
  267. /* wrap around, pretend it's after */
  268. tmp += sa_manager->size;
  269. }
  270. tmp -= soffset;
  271. if (tmp < best) {
  272. /* this sa bo is the closest one */
  273. best = tmp;
  274. best_bo = sa_bo;
  275. }
  276. }
  277. if (best_bo) {
  278. uint32_t idx = amdgpu_sa_get_ring_from_fence(best_bo->fence);
  279. ++tries[idx];
  280. sa_manager->hole = best_bo->olist.prev;
  281. /* we knew that this one is signaled,
  282. so it's save to remote it */
  283. amdgpu_sa_bo_remove_locked(best_bo);
  284. return true;
  285. }
  286. return false;
  287. }
  288. int amdgpu_sa_bo_new(struct amdgpu_device *adev,
  289. struct amdgpu_sa_manager *sa_manager,
  290. struct amdgpu_sa_bo **sa_bo,
  291. unsigned size, unsigned align)
  292. {
  293. struct fence *fences[AMDGPU_MAX_RINGS];
  294. unsigned tries[AMDGPU_MAX_RINGS];
  295. int i, r;
  296. signed long t;
  297. BUG_ON(align > sa_manager->align);
  298. BUG_ON(size > sa_manager->size);
  299. *sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
  300. if ((*sa_bo) == NULL) {
  301. return -ENOMEM;
  302. }
  303. (*sa_bo)->manager = sa_manager;
  304. (*sa_bo)->fence = NULL;
  305. INIT_LIST_HEAD(&(*sa_bo)->olist);
  306. INIT_LIST_HEAD(&(*sa_bo)->flist);
  307. spin_lock(&sa_manager->wq.lock);
  308. do {
  309. for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
  310. fences[i] = NULL;
  311. tries[i] = 0;
  312. }
  313. do {
  314. amdgpu_sa_bo_try_free(sa_manager);
  315. if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo,
  316. size, align)) {
  317. spin_unlock(&sa_manager->wq.lock);
  318. return 0;
  319. }
  320. /* see if we can skip over some allocations */
  321. } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
  322. spin_unlock(&sa_manager->wq.lock);
  323. t = amdgpu_fence_wait_any(adev, fences, AMDGPU_MAX_RINGS,
  324. false, MAX_SCHEDULE_TIMEOUT);
  325. r = (t > 0) ? 0 : t;
  326. spin_lock(&sa_manager->wq.lock);
  327. /* if we have nothing to wait for block */
  328. if (r == -ENOENT) {
  329. r = wait_event_interruptible_locked(
  330. sa_manager->wq,
  331. amdgpu_sa_event(sa_manager, size, align)
  332. );
  333. }
  334. } while (!r);
  335. spin_unlock(&sa_manager->wq.lock);
  336. kfree(*sa_bo);
  337. *sa_bo = NULL;
  338. return r;
  339. }
  340. void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
  341. struct fence *fence)
  342. {
  343. struct amdgpu_sa_manager *sa_manager;
  344. if (sa_bo == NULL || *sa_bo == NULL) {
  345. return;
  346. }
  347. sa_manager = (*sa_bo)->manager;
  348. spin_lock(&sa_manager->wq.lock);
  349. if (fence && !fence_is_signaled(fence)) {
  350. uint32_t idx;
  351. (*sa_bo)->fence = fence_get(fence);
  352. idx = amdgpu_sa_get_ring_from_fence(fence);
  353. list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
  354. } else {
  355. amdgpu_sa_bo_remove_locked(*sa_bo);
  356. }
  357. wake_up_all_locked(&sa_manager->wq);
  358. spin_unlock(&sa_manager->wq.lock);
  359. *sa_bo = NULL;
  360. }
  361. #if defined(CONFIG_DEBUG_FS)
  362. static void amdgpu_sa_bo_dump_fence(struct fence *fence, struct seq_file *m)
  363. {
  364. struct amdgpu_fence *a_fence = to_amdgpu_fence(fence);
  365. struct amd_sched_fence *s_fence = to_amd_sched_fence(fence);
  366. if (a_fence)
  367. seq_printf(m, " protected by 0x%016llx on ring %d",
  368. a_fence->seq, a_fence->ring->idx);
  369. if (s_fence) {
  370. struct amdgpu_ring *ring;
  371. ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
  372. seq_printf(m, " protected by 0x%016x on ring %d",
  373. s_fence->base.seqno, ring->idx);
  374. }
  375. }
  376. void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
  377. struct seq_file *m)
  378. {
  379. struct amdgpu_sa_bo *i;
  380. spin_lock(&sa_manager->wq.lock);
  381. list_for_each_entry(i, &sa_manager->olist, olist) {
  382. uint64_t soffset = i->soffset + sa_manager->gpu_addr;
  383. uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
  384. if (&i->olist == sa_manager->hole) {
  385. seq_printf(m, ">");
  386. } else {
  387. seq_printf(m, " ");
  388. }
  389. seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
  390. soffset, eoffset, eoffset - soffset);
  391. if (i->fence)
  392. amdgpu_sa_bo_dump_fence(i->fence, m);
  393. seq_printf(m, "\n");
  394. }
  395. spin_unlock(&sa_manager->wq.lock);
  396. }
  397. #endif