amdgpu_sa.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398
  1. /*
  2. * Copyright 2011 Red Hat Inc.
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the
  7. * "Software"), to deal in the Software without restriction, including
  8. * without limitation the rights to use, copy, modify, merge, publish,
  9. * distribute, sub license, and/or sell copies of the Software, and to
  10. * permit persons to whom the Software is furnished to do so, subject to
  11. * the following conditions:
  12. *
  13. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20. *
  21. * The above copyright notice and this permission notice (including the
  22. * next paragraph) shall be included in all copies or substantial portions
  23. * of the Software.
  24. *
  25. */
  26. /*
  27. * Authors:
  28. * Jerome Glisse <glisse@freedesktop.org>
  29. */
  30. /* Algorithm:
  31. *
  32. * We store the last allocated bo in "hole", we always try to allocate
  33. * after the last allocated bo. Principle is that in a linear GPU ring
  34. * progression was is after last is the oldest bo we allocated and thus
  35. * the first one that should no longer be in use by the GPU.
  36. *
  37. * If it's not the case we skip over the bo after last to the closest
  38. * done bo if such one exist. If none exist and we are not asked to
  39. * block we report failure to allocate.
  40. *
  41. * If we are asked to block we wait on all the oldest fence of all
  42. * rings. We just wait for any of those fence to complete.
  43. */
  44. #include <drm/drmP.h>
  45. #include "amdgpu.h"
  46. static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo);
  47. static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager);
  48. int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
  49. struct amdgpu_sa_manager *sa_manager,
  50. unsigned size, u32 align, u32 domain)
  51. {
  52. int i, r;
  53. init_waitqueue_head(&sa_manager->wq);
  54. sa_manager->bo = NULL;
  55. sa_manager->size = size;
  56. sa_manager->domain = domain;
  57. sa_manager->align = align;
  58. sa_manager->hole = &sa_manager->olist;
  59. INIT_LIST_HEAD(&sa_manager->olist);
  60. for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
  61. INIT_LIST_HEAD(&sa_manager->flist[i]);
  62. r = amdgpu_bo_create_kernel(adev, size, align, domain, &sa_manager->bo,
  63. &sa_manager->gpu_addr, &sa_manager->cpu_ptr);
  64. if (r) {
  65. dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
  66. return r;
  67. }
  68. memset(sa_manager->cpu_ptr, 0, sa_manager->size);
  69. return r;
  70. }
  71. void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
  72. struct amdgpu_sa_manager *sa_manager)
  73. {
  74. struct amdgpu_sa_bo *sa_bo, *tmp;
  75. if (sa_manager->bo == NULL) {
  76. dev_err(adev->dev, "no bo for sa manager\n");
  77. return;
  78. }
  79. if (!list_empty(&sa_manager->olist)) {
  80. sa_manager->hole = &sa_manager->olist,
  81. amdgpu_sa_bo_try_free(sa_manager);
  82. if (!list_empty(&sa_manager->olist)) {
  83. dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n");
  84. }
  85. }
  86. list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
  87. amdgpu_sa_bo_remove_locked(sa_bo);
  88. }
  89. amdgpu_bo_free_kernel(&sa_manager->bo, &sa_manager->gpu_addr, &sa_manager->cpu_ptr);
  90. sa_manager->size = 0;
  91. }
  92. static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
  93. {
  94. struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
  95. if (sa_manager->hole == &sa_bo->olist) {
  96. sa_manager->hole = sa_bo->olist.prev;
  97. }
  98. list_del_init(&sa_bo->olist);
  99. list_del_init(&sa_bo->flist);
  100. dma_fence_put(sa_bo->fence);
  101. kfree(sa_bo);
  102. }
  103. static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
  104. {
  105. struct amdgpu_sa_bo *sa_bo, *tmp;
  106. if (sa_manager->hole->next == &sa_manager->olist)
  107. return;
  108. sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
  109. list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
  110. if (sa_bo->fence == NULL ||
  111. !dma_fence_is_signaled(sa_bo->fence)) {
  112. return;
  113. }
  114. amdgpu_sa_bo_remove_locked(sa_bo);
  115. }
  116. }
  117. static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager)
  118. {
  119. struct list_head *hole = sa_manager->hole;
  120. if (hole != &sa_manager->olist) {
  121. return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset;
  122. }
  123. return 0;
  124. }
  125. static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager)
  126. {
  127. struct list_head *hole = sa_manager->hole;
  128. if (hole->next != &sa_manager->olist) {
  129. return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset;
  130. }
  131. return sa_manager->size;
  132. }
  133. static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager,
  134. struct amdgpu_sa_bo *sa_bo,
  135. unsigned size, unsigned align)
  136. {
  137. unsigned soffset, eoffset, wasted;
  138. soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
  139. eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
  140. wasted = (align - (soffset % align)) % align;
  141. if ((eoffset - soffset) >= (size + wasted)) {
  142. soffset += wasted;
  143. sa_bo->manager = sa_manager;
  144. sa_bo->soffset = soffset;
  145. sa_bo->eoffset = soffset + size;
  146. list_add(&sa_bo->olist, sa_manager->hole);
  147. INIT_LIST_HEAD(&sa_bo->flist);
  148. sa_manager->hole = &sa_bo->olist;
  149. return true;
  150. }
  151. return false;
  152. }
  153. /**
  154. * amdgpu_sa_event - Check if we can stop waiting
  155. *
  156. * @sa_manager: pointer to the sa_manager
  157. * @size: number of bytes we want to allocate
  158. * @align: alignment we need to match
  159. *
  160. * Check if either there is a fence we can wait for or
  161. * enough free memory to satisfy the allocation directly
  162. */
  163. static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
  164. unsigned size, unsigned align)
  165. {
  166. unsigned soffset, eoffset, wasted;
  167. int i;
  168. for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
  169. if (!list_empty(&sa_manager->flist[i]))
  170. return true;
  171. soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
  172. eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
  173. wasted = (align - (soffset % align)) % align;
  174. if ((eoffset - soffset) >= (size + wasted)) {
  175. return true;
  176. }
  177. return false;
  178. }
  179. static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
  180. struct dma_fence **fences,
  181. unsigned *tries)
  182. {
  183. struct amdgpu_sa_bo *best_bo = NULL;
  184. unsigned i, soffset, best, tmp;
  185. /* if hole points to the end of the buffer */
  186. if (sa_manager->hole->next == &sa_manager->olist) {
  187. /* try again with its beginning */
  188. sa_manager->hole = &sa_manager->olist;
  189. return true;
  190. }
  191. soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
  192. /* to handle wrap around we add sa_manager->size */
  193. best = sa_manager->size * 2;
  194. /* go over all fence list and try to find the closest sa_bo
  195. * of the current last
  196. */
  197. for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) {
  198. struct amdgpu_sa_bo *sa_bo;
  199. if (list_empty(&sa_manager->flist[i]))
  200. continue;
  201. sa_bo = list_first_entry(&sa_manager->flist[i],
  202. struct amdgpu_sa_bo, flist);
  203. if (!dma_fence_is_signaled(sa_bo->fence)) {
  204. fences[i] = sa_bo->fence;
  205. continue;
  206. }
  207. /* limit the number of tries each ring gets */
  208. if (tries[i] > 2) {
  209. continue;
  210. }
  211. tmp = sa_bo->soffset;
  212. if (tmp < soffset) {
  213. /* wrap around, pretend it's after */
  214. tmp += sa_manager->size;
  215. }
  216. tmp -= soffset;
  217. if (tmp < best) {
  218. /* this sa bo is the closest one */
  219. best = tmp;
  220. best_bo = sa_bo;
  221. }
  222. }
  223. if (best_bo) {
  224. uint32_t idx = best_bo->fence->context;
  225. idx %= AMDGPU_SA_NUM_FENCE_LISTS;
  226. ++tries[idx];
  227. sa_manager->hole = best_bo->olist.prev;
  228. /* we knew that this one is signaled,
  229. so it's save to remote it */
  230. amdgpu_sa_bo_remove_locked(best_bo);
  231. return true;
  232. }
  233. return false;
  234. }
  235. int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
  236. struct amdgpu_sa_bo **sa_bo,
  237. unsigned size, unsigned align)
  238. {
  239. struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
  240. unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS];
  241. unsigned count;
  242. int i, r;
  243. signed long t;
  244. if (WARN_ON_ONCE(align > sa_manager->align))
  245. return -EINVAL;
  246. if (WARN_ON_ONCE(size > sa_manager->size))
  247. return -EINVAL;
  248. *sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
  249. if (!(*sa_bo))
  250. return -ENOMEM;
  251. (*sa_bo)->manager = sa_manager;
  252. (*sa_bo)->fence = NULL;
  253. INIT_LIST_HEAD(&(*sa_bo)->olist);
  254. INIT_LIST_HEAD(&(*sa_bo)->flist);
  255. spin_lock(&sa_manager->wq.lock);
  256. do {
  257. for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) {
  258. fences[i] = NULL;
  259. tries[i] = 0;
  260. }
  261. do {
  262. amdgpu_sa_bo_try_free(sa_manager);
  263. if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo,
  264. size, align)) {
  265. spin_unlock(&sa_manager->wq.lock);
  266. return 0;
  267. }
  268. /* see if we can skip over some allocations */
  269. } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
  270. for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
  271. if (fences[i])
  272. fences[count++] = dma_fence_get(fences[i]);
  273. if (count) {
  274. spin_unlock(&sa_manager->wq.lock);
  275. t = dma_fence_wait_any_timeout(fences, count, false,
  276. MAX_SCHEDULE_TIMEOUT,
  277. NULL);
  278. for (i = 0; i < count; ++i)
  279. dma_fence_put(fences[i]);
  280. r = (t > 0) ? 0 : t;
  281. spin_lock(&sa_manager->wq.lock);
  282. } else {
  283. /* if we have nothing to wait for block */
  284. r = wait_event_interruptible_locked(
  285. sa_manager->wq,
  286. amdgpu_sa_event(sa_manager, size, align)
  287. );
  288. }
  289. } while (!r);
  290. spin_unlock(&sa_manager->wq.lock);
  291. kfree(*sa_bo);
  292. *sa_bo = NULL;
  293. return r;
  294. }
  295. void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
  296. struct dma_fence *fence)
  297. {
  298. struct amdgpu_sa_manager *sa_manager;
  299. if (sa_bo == NULL || *sa_bo == NULL) {
  300. return;
  301. }
  302. sa_manager = (*sa_bo)->manager;
  303. spin_lock(&sa_manager->wq.lock);
  304. if (fence && !dma_fence_is_signaled(fence)) {
  305. uint32_t idx;
  306. (*sa_bo)->fence = dma_fence_get(fence);
  307. idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS;
  308. list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
  309. } else {
  310. amdgpu_sa_bo_remove_locked(*sa_bo);
  311. }
  312. wake_up_all_locked(&sa_manager->wq);
  313. spin_unlock(&sa_manager->wq.lock);
  314. *sa_bo = NULL;
  315. }
  316. #if defined(CONFIG_DEBUG_FS)
  317. void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
  318. struct seq_file *m)
  319. {
  320. struct amdgpu_sa_bo *i;
  321. spin_lock(&sa_manager->wq.lock);
  322. list_for_each_entry(i, &sa_manager->olist, olist) {
  323. uint64_t soffset = i->soffset + sa_manager->gpu_addr;
  324. uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
  325. if (&i->olist == sa_manager->hole) {
  326. seq_printf(m, ">");
  327. } else {
  328. seq_printf(m, " ");
  329. }
  330. seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
  331. soffset, eoffset, eoffset - soffset);
  332. if (i->fence)
  333. seq_printf(m, " protected by 0x%08x on context %llu",
  334. i->fence->seqno, i->fence->context);
  335. seq_printf(m, "\n");
  336. }
  337. spin_unlock(&sa_manager->wq.lock);
  338. }
  339. #endif