amdgpu_gtt_mgr.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. /*
  2. * Copyright 2016 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Christian König
  23. */
  24. #include <drm/drmP.h>
  25. #include "amdgpu.h"
  26. struct amdgpu_gtt_mgr {
  27. struct drm_mm mm;
  28. spinlock_t lock;
  29. atomic64_t available;
  30. };
  31. /**
  32. * amdgpu_gtt_mgr_init - init GTT manager and DRM MM
  33. *
  34. * @man: TTM memory type manager
  35. * @p_size: maximum size of GTT
  36. *
  37. * Allocate and initialize the GTT manager.
  38. */
  39. static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
  40. unsigned long p_size)
  41. {
  42. struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
  43. struct amdgpu_gtt_mgr *mgr;
  44. uint64_t start, size;
  45. mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
  46. if (!mgr)
  47. return -ENOMEM;
  48. start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
  49. size = (adev->mc.gart_size >> PAGE_SHIFT) - start;
  50. drm_mm_init(&mgr->mm, start, size);
  51. spin_lock_init(&mgr->lock);
  52. atomic64_set(&mgr->available, p_size);
  53. man->priv = mgr;
  54. return 0;
  55. }
  56. /**
  57. * amdgpu_gtt_mgr_fini - free and destroy GTT manager
  58. *
  59. * @man: TTM memory type manager
  60. *
  61. * Destroy and free the GTT manager, returns -EBUSY if ranges are still
  62. * allocated inside it.
  63. */
  64. static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
  65. {
  66. struct amdgpu_gtt_mgr *mgr = man->priv;
  67. spin_lock(&mgr->lock);
  68. if (!drm_mm_clean(&mgr->mm)) {
  69. spin_unlock(&mgr->lock);
  70. return -EBUSY;
  71. }
  72. drm_mm_takedown(&mgr->mm);
  73. spin_unlock(&mgr->lock);
  74. kfree(mgr);
  75. man->priv = NULL;
  76. return 0;
  77. }
  78. /**
  79. * amdgpu_gtt_mgr_is_allocated - Check if mem has address space
  80. *
  81. * @mem: the mem object to check
  82. *
  83. * Check if a mem object has already address space allocated.
  84. */
  85. bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem)
  86. {
  87. struct drm_mm_node *node = mem->mm_node;
  88. return (node->start != AMDGPU_BO_INVALID_OFFSET);
  89. }
  90. /**
  91. * amdgpu_gtt_mgr_alloc - allocate new ranges
  92. *
  93. * @man: TTM memory type manager
  94. * @tbo: TTM BO we need this range for
  95. * @place: placement flags and restrictions
  96. * @mem: the resulting mem object
  97. *
  98. * Allocate the address space for a node.
  99. */
  100. static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
  101. struct ttm_buffer_object *tbo,
  102. const struct ttm_place *place,
  103. struct ttm_mem_reg *mem)
  104. {
  105. struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
  106. struct amdgpu_gtt_mgr *mgr = man->priv;
  107. struct drm_mm_node *node = mem->mm_node;
  108. enum drm_mm_insert_mode mode;
  109. unsigned long fpfn, lpfn;
  110. int r;
  111. if (amdgpu_gtt_mgr_is_allocated(mem))
  112. return 0;
  113. if (place)
  114. fpfn = place->fpfn;
  115. else
  116. fpfn = 0;
  117. if (place && place->lpfn)
  118. lpfn = place->lpfn;
  119. else
  120. lpfn = adev->gart.num_cpu_pages;
  121. mode = DRM_MM_INSERT_BEST;
  122. if (place && place->flags & TTM_PL_FLAG_TOPDOWN)
  123. mode = DRM_MM_INSERT_HIGH;
  124. spin_lock(&mgr->lock);
  125. r = drm_mm_insert_node_in_range(&mgr->mm, node,
  126. mem->num_pages, mem->page_alignment, 0,
  127. fpfn, lpfn, mode);
  128. spin_unlock(&mgr->lock);
  129. if (!r)
  130. mem->start = node->start;
  131. return r;
  132. }
  133. /**
  134. * amdgpu_gtt_mgr_new - allocate a new node
  135. *
  136. * @man: TTM memory type manager
  137. * @tbo: TTM BO we need this range for
  138. * @place: placement flags and restrictions
  139. * @mem: the resulting mem object
  140. *
  141. * Dummy, allocate the node but no space for it yet.
  142. */
  143. static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
  144. struct ttm_buffer_object *tbo,
  145. const struct ttm_place *place,
  146. struct ttm_mem_reg *mem)
  147. {
  148. struct amdgpu_gtt_mgr *mgr = man->priv;
  149. struct drm_mm_node *node;
  150. int r;
  151. spin_lock(&mgr->lock);
  152. if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) &&
  153. atomic64_read(&mgr->available) < mem->num_pages) {
  154. spin_unlock(&mgr->lock);
  155. return 0;
  156. }
  157. atomic64_sub(mem->num_pages, &mgr->available);
  158. spin_unlock(&mgr->lock);
  159. node = kzalloc(sizeof(*node), GFP_KERNEL);
  160. if (!node) {
  161. r = -ENOMEM;
  162. goto err_out;
  163. }
  164. node->start = AMDGPU_BO_INVALID_OFFSET;
  165. node->size = mem->num_pages;
  166. mem->mm_node = node;
  167. if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
  168. r = amdgpu_gtt_mgr_alloc(man, tbo, place, mem);
  169. if (unlikely(r)) {
  170. kfree(node);
  171. mem->mm_node = NULL;
  172. r = 0;
  173. goto err_out;
  174. }
  175. } else {
  176. mem->start = node->start;
  177. }
  178. return 0;
  179. err_out:
  180. atomic64_add(mem->num_pages, &mgr->available);
  181. return r;
  182. }
  183. /**
  184. * amdgpu_gtt_mgr_del - free ranges
  185. *
  186. * @man: TTM memory type manager
  187. * @tbo: TTM BO we need this range for
  188. * @place: placement flags and restrictions
  189. * @mem: TTM memory object
  190. *
  191. * Free the allocated GTT again.
  192. */
  193. static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
  194. struct ttm_mem_reg *mem)
  195. {
  196. struct amdgpu_gtt_mgr *mgr = man->priv;
  197. struct drm_mm_node *node = mem->mm_node;
  198. if (!node)
  199. return;
  200. spin_lock(&mgr->lock);
  201. if (node->start != AMDGPU_BO_INVALID_OFFSET)
  202. drm_mm_remove_node(node);
  203. spin_unlock(&mgr->lock);
  204. atomic64_add(mem->num_pages, &mgr->available);
  205. kfree(node);
  206. mem->mm_node = NULL;
  207. }
  208. /**
  209. * amdgpu_gtt_mgr_usage - return usage of GTT domain
  210. *
  211. * @man: TTM memory type manager
  212. *
  213. * Return how many bytes are used in the GTT domain
  214. */
  215. uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man)
  216. {
  217. struct amdgpu_gtt_mgr *mgr = man->priv;
  218. s64 result = man->size - atomic64_read(&mgr->available);
  219. return (result > 0 ? result : 0) * PAGE_SIZE;
  220. }
  221. /**
  222. * amdgpu_gtt_mgr_debug - dump VRAM table
  223. *
  224. * @man: TTM memory type manager
  225. * @printer: DRM printer to use
  226. *
  227. * Dump the table content using printk.
  228. */
  229. static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
  230. struct drm_printer *printer)
  231. {
  232. struct amdgpu_gtt_mgr *mgr = man->priv;
  233. spin_lock(&mgr->lock);
  234. drm_mm_print(&mgr->mm, printer);
  235. spin_unlock(&mgr->lock);
  236. drm_printf(printer, "man size:%llu pages, gtt available:%lld pages, usage:%lluMB\n",
  237. man->size, (u64)atomic64_read(&mgr->available),
  238. amdgpu_gtt_mgr_usage(man) >> 20);
  239. }
  240. const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = {
  241. .init = amdgpu_gtt_mgr_init,
  242. .takedown = amdgpu_gtt_mgr_fini,
  243. .get_node = amdgpu_gtt_mgr_new,
  244. .put_node = amdgpu_gtt_mgr_del,
  245. .debug = amdgpu_gtt_mgr_debug
  246. };