amdgpu_gtt_mgr.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283
  1. /*
  2. * Copyright 2016 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Christian König
  23. */
  24. #include <drm/drmP.h>
  25. #include "amdgpu.h"
  26. struct amdgpu_gtt_mgr {
  27. struct drm_mm mm;
  28. spinlock_t lock;
  29. atomic64_t available;
  30. };
  31. /**
  32. * amdgpu_gtt_mgr_init - init GTT manager and DRM MM
  33. *
  34. * @man: TTM memory type manager
  35. * @p_size: maximum size of GTT
  36. *
  37. * Allocate and initialize the GTT manager.
  38. */
  39. static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
  40. unsigned long p_size)
  41. {
  42. struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
  43. struct amdgpu_gtt_mgr *mgr;
  44. uint64_t start, size;
  45. mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
  46. if (!mgr)
  47. return -ENOMEM;
  48. start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
  49. size = (adev->mc.gart_size >> PAGE_SHIFT) - start;
  50. drm_mm_init(&mgr->mm, start, size);
  51. spin_lock_init(&mgr->lock);
  52. atomic64_set(&mgr->available, p_size);
  53. man->priv = mgr;
  54. return 0;
  55. }
  56. /**
  57. * amdgpu_gtt_mgr_fini - free and destroy GTT manager
  58. *
  59. * @man: TTM memory type manager
  60. *
  61. * Destroy and free the GTT manager, returns -EBUSY if ranges are still
  62. * allocated inside it.
  63. */
  64. static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
  65. {
  66. struct amdgpu_gtt_mgr *mgr = man->priv;
  67. spin_lock(&mgr->lock);
  68. if (!drm_mm_clean(&mgr->mm)) {
  69. spin_unlock(&mgr->lock);
  70. return -EBUSY;
  71. }
  72. drm_mm_takedown(&mgr->mm);
  73. spin_unlock(&mgr->lock);
  74. kfree(mgr);
  75. man->priv = NULL;
  76. return 0;
  77. }
  78. /**
  79. * amdgpu_gtt_mgr_is_allocated - Check if mem has address space
  80. *
  81. * @mem: the mem object to check
  82. *
  83. * Check if a mem object has already address space allocated.
  84. */
  85. bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem)
  86. {
  87. struct drm_mm_node *node = mem->mm_node;
  88. return (node->start != AMDGPU_BO_INVALID_OFFSET);
  89. }
  90. /**
  91. * amdgpu_gtt_mgr_alloc - allocate new ranges
  92. *
  93. * @man: TTM memory type manager
  94. * @tbo: TTM BO we need this range for
  95. * @place: placement flags and restrictions
  96. * @mem: the resulting mem object
  97. *
  98. * Allocate the address space for a node.
  99. */
  100. int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
  101. struct ttm_buffer_object *tbo,
  102. const struct ttm_place *place,
  103. struct ttm_mem_reg *mem)
  104. {
  105. struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
  106. struct amdgpu_gtt_mgr *mgr = man->priv;
  107. struct drm_mm_node *node = mem->mm_node;
  108. enum drm_mm_insert_mode mode;
  109. unsigned long fpfn, lpfn;
  110. int r;
  111. if (amdgpu_gtt_mgr_is_allocated(mem))
  112. return 0;
  113. if (place)
  114. fpfn = place->fpfn;
  115. else
  116. fpfn = 0;
  117. if (place && place->lpfn)
  118. lpfn = place->lpfn;
  119. else
  120. lpfn = adev->gart.num_cpu_pages;
  121. mode = DRM_MM_INSERT_BEST;
  122. if (place && place->flags & TTM_PL_FLAG_TOPDOWN)
  123. mode = DRM_MM_INSERT_HIGH;
  124. spin_lock(&mgr->lock);
  125. r = drm_mm_insert_node_in_range(&mgr->mm, node,
  126. mem->num_pages, mem->page_alignment, 0,
  127. fpfn, lpfn, mode);
  128. spin_unlock(&mgr->lock);
  129. if (!r) {
  130. mem->start = node->start;
  131. if (&tbo->mem == mem)
  132. tbo->offset = (tbo->mem.start << PAGE_SHIFT) +
  133. tbo->bdev->man[tbo->mem.mem_type].gpu_offset;
  134. }
  135. return r;
  136. }
  137. /**
  138. * amdgpu_gtt_mgr_new - allocate a new node
  139. *
  140. * @man: TTM memory type manager
  141. * @tbo: TTM BO we need this range for
  142. * @place: placement flags and restrictions
  143. * @mem: the resulting mem object
  144. *
  145. * Dummy, allocate the node but no space for it yet.
  146. */
  147. static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
  148. struct ttm_buffer_object *tbo,
  149. const struct ttm_place *place,
  150. struct ttm_mem_reg *mem)
  151. {
  152. struct amdgpu_gtt_mgr *mgr = man->priv;
  153. struct drm_mm_node *node;
  154. int r;
  155. spin_lock(&mgr->lock);
  156. if (atomic64_read(&mgr->available) < mem->num_pages) {
  157. spin_unlock(&mgr->lock);
  158. return 0;
  159. }
  160. atomic64_sub(mem->num_pages, &mgr->available);
  161. spin_unlock(&mgr->lock);
  162. node = kzalloc(sizeof(*node), GFP_KERNEL);
  163. if (!node) {
  164. r = -ENOMEM;
  165. goto err_out;
  166. }
  167. node->start = AMDGPU_BO_INVALID_OFFSET;
  168. node->size = mem->num_pages;
  169. mem->mm_node = node;
  170. if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
  171. r = amdgpu_gtt_mgr_alloc(man, tbo, place, mem);
  172. if (unlikely(r)) {
  173. kfree(node);
  174. mem->mm_node = NULL;
  175. r = 0;
  176. goto err_out;
  177. }
  178. } else {
  179. mem->start = node->start;
  180. }
  181. return 0;
  182. err_out:
  183. atomic64_add(mem->num_pages, &mgr->available);
  184. return r;
  185. }
  186. /**
  187. * amdgpu_gtt_mgr_del - free ranges
  188. *
  189. * @man: TTM memory type manager
  190. * @tbo: TTM BO we need this range for
  191. * @place: placement flags and restrictions
  192. * @mem: TTM memory object
  193. *
  194. * Free the allocated GTT again.
  195. */
  196. static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
  197. struct ttm_mem_reg *mem)
  198. {
  199. struct amdgpu_gtt_mgr *mgr = man->priv;
  200. struct drm_mm_node *node = mem->mm_node;
  201. if (!node)
  202. return;
  203. spin_lock(&mgr->lock);
  204. if (node->start != AMDGPU_BO_INVALID_OFFSET)
  205. drm_mm_remove_node(node);
  206. spin_unlock(&mgr->lock);
  207. atomic64_add(mem->num_pages, &mgr->available);
  208. kfree(node);
  209. mem->mm_node = NULL;
  210. }
  211. /**
  212. * amdgpu_gtt_mgr_usage - return usage of GTT domain
  213. *
  214. * @man: TTM memory type manager
  215. *
  216. * Return how many bytes are used in the GTT domain
  217. */
  218. uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man)
  219. {
  220. struct amdgpu_gtt_mgr *mgr = man->priv;
  221. return (u64)(man->size - atomic64_read(&mgr->available)) * PAGE_SIZE;
  222. }
  223. /**
  224. * amdgpu_gtt_mgr_debug - dump VRAM table
  225. *
  226. * @man: TTM memory type manager
  227. * @printer: DRM printer to use
  228. *
  229. * Dump the table content using printk.
  230. */
  231. static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
  232. struct drm_printer *printer)
  233. {
  234. struct amdgpu_gtt_mgr *mgr = man->priv;
  235. spin_lock(&mgr->lock);
  236. drm_mm_print(&mgr->mm, printer);
  237. spin_unlock(&mgr->lock);
  238. drm_printf(printer, "man size:%llu pages, gtt available:%llu pages, usage:%lluMB\n",
  239. man->size, (u64)atomic64_read(&mgr->available),
  240. amdgpu_gtt_mgr_usage(man) >> 20);
  241. }
  242. const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = {
  243. .init = amdgpu_gtt_mgr_init,
  244. .takedown = amdgpu_gtt_mgr_fini,
  245. .get_node = amdgpu_gtt_mgr_new,
  246. .put_node = amdgpu_gtt_mgr_del,
  247. .debug = amdgpu_gtt_mgr_debug
  248. };