amdgpu_gtt_mgr.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300
  1. /*
  2. * Copyright 2016 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Christian König
  23. */
  24. #include <drm/drmP.h>
  25. #include "amdgpu.h"
  26. struct amdgpu_gtt_mgr {
  27. struct drm_mm mm;
  28. spinlock_t lock;
  29. atomic64_t available;
  30. };
  31. struct amdgpu_gtt_node {
  32. struct drm_mm_node node;
  33. struct ttm_buffer_object *tbo;
  34. };
  35. /**
  36. * amdgpu_gtt_mgr_init - init GTT manager and DRM MM
  37. *
  38. * @man: TTM memory type manager
  39. * @p_size: maximum size of GTT
  40. *
  41. * Allocate and initialize the GTT manager.
  42. */
  43. static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
  44. unsigned long p_size)
  45. {
  46. struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
  47. struct amdgpu_gtt_mgr *mgr;
  48. uint64_t start, size;
  49. mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
  50. if (!mgr)
  51. return -ENOMEM;
  52. start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
  53. size = (adev->mc.gart_size >> PAGE_SHIFT) - start;
  54. drm_mm_init(&mgr->mm, start, size);
  55. spin_lock_init(&mgr->lock);
  56. atomic64_set(&mgr->available, p_size);
  57. man->priv = mgr;
  58. return 0;
  59. }
  60. /**
  61. * amdgpu_gtt_mgr_fini - free and destroy GTT manager
  62. *
  63. * @man: TTM memory type manager
  64. *
  65. * Destroy and free the GTT manager, returns -EBUSY if ranges are still
  66. * allocated inside it.
  67. */
  68. static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
  69. {
  70. struct amdgpu_gtt_mgr *mgr = man->priv;
  71. drm_mm_takedown(&mgr->mm);
  72. spin_unlock(&mgr->lock);
  73. kfree(mgr);
  74. man->priv = NULL;
  75. return 0;
  76. }
  77. /**
  78. * amdgpu_gtt_mgr_has_gart_addr - Check if mem has address space
  79. *
  80. * @mem: the mem object to check
  81. *
  82. * Check if a mem object has already address space allocated.
  83. */
  84. bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem)
  85. {
  86. struct amdgpu_gtt_node *node = mem->mm_node;
  87. return (node->node.start != AMDGPU_BO_INVALID_OFFSET);
  88. }
  89. /**
  90. * amdgpu_gtt_mgr_alloc - allocate new ranges
  91. *
  92. * @man: TTM memory type manager
  93. * @tbo: TTM BO we need this range for
  94. * @place: placement flags and restrictions
  95. * @mem: the resulting mem object
  96. *
  97. * Allocate the address space for a node.
  98. */
  99. static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
  100. struct ttm_buffer_object *tbo,
  101. const struct ttm_place *place,
  102. struct ttm_mem_reg *mem)
  103. {
  104. struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
  105. struct amdgpu_gtt_mgr *mgr = man->priv;
  106. struct amdgpu_gtt_node *node = mem->mm_node;
  107. enum drm_mm_insert_mode mode;
  108. unsigned long fpfn, lpfn;
  109. int r;
  110. if (amdgpu_gtt_mgr_has_gart_addr(mem))
  111. return 0;
  112. if (place)
  113. fpfn = place->fpfn;
  114. else
  115. fpfn = 0;
  116. if (place && place->lpfn)
  117. lpfn = place->lpfn;
  118. else
  119. lpfn = adev->gart.num_cpu_pages;
  120. mode = DRM_MM_INSERT_BEST;
  121. if (place && place->flags & TTM_PL_FLAG_TOPDOWN)
  122. mode = DRM_MM_INSERT_HIGH;
  123. spin_lock(&mgr->lock);
  124. r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages,
  125. mem->page_alignment, 0, fpfn, lpfn,
  126. mode);
  127. spin_unlock(&mgr->lock);
  128. if (!r)
  129. mem->start = node->node.start;
  130. return r;
  131. }
  132. /**
  133. * amdgpu_gtt_mgr_new - allocate a new node
  134. *
  135. * @man: TTM memory type manager
  136. * @tbo: TTM BO we need this range for
  137. * @place: placement flags and restrictions
  138. * @mem: the resulting mem object
  139. *
  140. * Dummy, allocate the node but no space for it yet.
  141. */
  142. static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
  143. struct ttm_buffer_object *tbo,
  144. const struct ttm_place *place,
  145. struct ttm_mem_reg *mem)
  146. {
  147. struct amdgpu_gtt_mgr *mgr = man->priv;
  148. struct amdgpu_gtt_node *node;
  149. int r;
  150. spin_lock(&mgr->lock);
  151. if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) &&
  152. atomic64_read(&mgr->available) < mem->num_pages) {
  153. spin_unlock(&mgr->lock);
  154. return 0;
  155. }
  156. atomic64_sub(mem->num_pages, &mgr->available);
  157. spin_unlock(&mgr->lock);
  158. node = kzalloc(sizeof(*node), GFP_KERNEL);
  159. if (!node) {
  160. r = -ENOMEM;
  161. goto err_out;
  162. }
  163. node->node.start = AMDGPU_BO_INVALID_OFFSET;
  164. node->node.size = mem->num_pages;
  165. node->tbo = tbo;
  166. mem->mm_node = node;
  167. if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
  168. r = amdgpu_gtt_mgr_alloc(man, tbo, place, mem);
  169. if (unlikely(r)) {
  170. kfree(node);
  171. mem->mm_node = NULL;
  172. r = 0;
  173. goto err_out;
  174. }
  175. } else {
  176. mem->start = node->node.start;
  177. }
  178. return 0;
  179. err_out:
  180. atomic64_add(mem->num_pages, &mgr->available);
  181. return r;
  182. }
  183. /**
  184. * amdgpu_gtt_mgr_del - free ranges
  185. *
  186. * @man: TTM memory type manager
  187. * @tbo: TTM BO we need this range for
  188. * @place: placement flags and restrictions
  189. * @mem: TTM memory object
  190. *
  191. * Free the allocated GTT again.
  192. */
  193. static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
  194. struct ttm_mem_reg *mem)
  195. {
  196. struct amdgpu_gtt_mgr *mgr = man->priv;
  197. struct amdgpu_gtt_node *node = mem->mm_node;
  198. if (!node)
  199. return;
  200. spin_lock(&mgr->lock);
  201. if (node->node.start != AMDGPU_BO_INVALID_OFFSET)
  202. drm_mm_remove_node(&node->node);
  203. spin_unlock(&mgr->lock);
  204. atomic64_add(mem->num_pages, &mgr->available);
  205. kfree(node);
  206. mem->mm_node = NULL;
  207. }
  208. /**
  209. * amdgpu_gtt_mgr_usage - return usage of GTT domain
  210. *
  211. * @man: TTM memory type manager
  212. *
  213. * Return how many bytes are used in the GTT domain
  214. */
  215. uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man)
  216. {
  217. struct amdgpu_gtt_mgr *mgr = man->priv;
  218. s64 result = man->size - atomic64_read(&mgr->available);
  219. return (result > 0 ? result : 0) * PAGE_SIZE;
  220. }
  221. int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man)
  222. {
  223. struct amdgpu_gtt_mgr *mgr = man->priv;
  224. struct amdgpu_gtt_node *node;
  225. struct drm_mm_node *mm_node;
  226. int r = 0;
  227. spin_lock(&mgr->lock);
  228. drm_mm_for_each_node(mm_node, &mgr->mm) {
  229. node = container_of(mm_node, struct amdgpu_gtt_node, node);
  230. r = amdgpu_ttm_recover_gart(node->tbo);
  231. if (r)
  232. break;
  233. }
  234. spin_unlock(&mgr->lock);
  235. return r;
  236. }
  237. /**
  238. * amdgpu_gtt_mgr_debug - dump VRAM table
  239. *
  240. * @man: TTM memory type manager
  241. * @printer: DRM printer to use
  242. *
  243. * Dump the table content using printk.
  244. */
  245. static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
  246. struct drm_printer *printer)
  247. {
  248. struct amdgpu_gtt_mgr *mgr = man->priv;
  249. spin_lock(&mgr->lock);
  250. drm_mm_print(&mgr->mm, printer);
  251. spin_unlock(&mgr->lock);
  252. drm_printf(printer, "man size:%llu pages, gtt available:%lld pages, usage:%lluMB\n",
  253. man->size, (u64)atomic64_read(&mgr->available),
  254. amdgpu_gtt_mgr_usage(man) >> 20);
  255. }
  256. const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = {
  257. .init = amdgpu_gtt_mgr_init,
  258. .takedown = amdgpu_gtt_mgr_fini,
  259. .get_node = amdgpu_gtt_mgr_new,
  260. .put_node = amdgpu_gtt_mgr_del,
  261. .debug = amdgpu_gtt_mgr_debug
  262. };