amdgpu_gtt_mgr.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249
  1. /*
  2. * Copyright 2016 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Christian König
  23. */
  24. #include <drm/drmP.h>
  25. #include "amdgpu.h"
  26. struct amdgpu_gtt_mgr {
  27. struct drm_mm mm;
  28. spinlock_t lock;
  29. uint64_t available;
  30. };
  31. /**
  32. * amdgpu_gtt_mgr_init - init GTT manager and DRM MM
  33. *
  34. * @man: TTM memory type manager
  35. * @p_size: maximum size of GTT
  36. *
  37. * Allocate and initialize the GTT manager.
  38. */
  39. static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
  40. unsigned long p_size)
  41. {
  42. struct amdgpu_gtt_mgr *mgr;
  43. mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
  44. if (!mgr)
  45. return -ENOMEM;
  46. drm_mm_init(&mgr->mm, 0, p_size);
  47. spin_lock_init(&mgr->lock);
  48. mgr->available = p_size;
  49. man->priv = mgr;
  50. return 0;
  51. }
  52. /**
  53. * amdgpu_gtt_mgr_fini - free and destroy GTT manager
  54. *
  55. * @man: TTM memory type manager
  56. *
  57. * Destroy and free the GTT manager, returns -EBUSY if ranges are still
  58. * allocated inside it.
  59. */
  60. static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
  61. {
  62. struct amdgpu_gtt_mgr *mgr = man->priv;
  63. spin_lock(&mgr->lock);
  64. if (!drm_mm_clean(&mgr->mm)) {
  65. spin_unlock(&mgr->lock);
  66. return -EBUSY;
  67. }
  68. drm_mm_takedown(&mgr->mm);
  69. spin_unlock(&mgr->lock);
  70. kfree(mgr);
  71. man->priv = NULL;
  72. return 0;
  73. }
  74. /**
  75. * amdgpu_gtt_mgr_alloc - allocate new ranges
  76. *
  77. * @man: TTM memory type manager
  78. * @tbo: TTM BO we need this range for
  79. * @place: placement flags and restrictions
  80. * @mem: the resulting mem object
  81. *
  82. * Allocate the address space for a node.
  83. */
  84. int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
  85. struct ttm_buffer_object *tbo,
  86. const struct ttm_place *place,
  87. struct ttm_mem_reg *mem)
  88. {
  89. struct amdgpu_gtt_mgr *mgr = man->priv;
  90. struct drm_mm_node *node = mem->mm_node;
  91. enum drm_mm_insert_mode mode;
  92. unsigned long fpfn, lpfn;
  93. int r;
  94. if (node->start != AMDGPU_BO_INVALID_OFFSET)
  95. return 0;
  96. if (place)
  97. fpfn = place->fpfn;
  98. else
  99. fpfn = 0;
  100. if (place && place->lpfn)
  101. lpfn = place->lpfn;
  102. else
  103. lpfn = man->size;
  104. mode = DRM_MM_INSERT_BEST;
  105. if (place && place->flags & TTM_PL_FLAG_TOPDOWN)
  106. mode = DRM_MM_INSERT_HIGH;
  107. spin_lock(&mgr->lock);
  108. r = drm_mm_insert_node_in_range(&mgr->mm, node,
  109. mem->num_pages, mem->page_alignment, 0,
  110. fpfn, lpfn, mode);
  111. spin_unlock(&mgr->lock);
  112. if (!r) {
  113. mem->start = node->start;
  114. if (&tbo->mem == mem)
  115. tbo->offset = (tbo->mem.start << PAGE_SHIFT) +
  116. tbo->bdev->man[tbo->mem.mem_type].gpu_offset;
  117. }
  118. return r;
  119. }
  120. /**
  121. * amdgpu_gtt_mgr_new - allocate a new node
  122. *
  123. * @man: TTM memory type manager
  124. * @tbo: TTM BO we need this range for
  125. * @place: placement flags and restrictions
  126. * @mem: the resulting mem object
  127. *
  128. * Dummy, allocate the node but no space for it yet.
  129. */
  130. static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
  131. struct ttm_buffer_object *tbo,
  132. const struct ttm_place *place,
  133. struct ttm_mem_reg *mem)
  134. {
  135. struct amdgpu_gtt_mgr *mgr = man->priv;
  136. struct drm_mm_node *node;
  137. int r;
  138. spin_lock(&mgr->lock);
  139. if (mgr->available < mem->num_pages) {
  140. spin_unlock(&mgr->lock);
  141. return 0;
  142. }
  143. mgr->available -= mem->num_pages;
  144. spin_unlock(&mgr->lock);
  145. node = kzalloc(sizeof(*node), GFP_KERNEL);
  146. if (!node) {
  147. r = -ENOMEM;
  148. goto err_out;
  149. }
  150. node->start = AMDGPU_BO_INVALID_OFFSET;
  151. node->size = mem->num_pages;
  152. mem->mm_node = node;
  153. if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
  154. r = amdgpu_gtt_mgr_alloc(man, tbo, place, mem);
  155. if (unlikely(r)) {
  156. kfree(node);
  157. mem->mm_node = NULL;
  158. r = 0;
  159. goto err_out;
  160. }
  161. } else {
  162. mem->start = node->start;
  163. }
  164. return 0;
  165. err_out:
  166. spin_lock(&mgr->lock);
  167. mgr->available += mem->num_pages;
  168. spin_unlock(&mgr->lock);
  169. return r;
  170. }
  171. /**
  172. * amdgpu_gtt_mgr_del - free ranges
  173. *
  174. * @man: TTM memory type manager
  175. * @tbo: TTM BO we need this range for
  176. * @place: placement flags and restrictions
  177. * @mem: TTM memory object
  178. *
  179. * Free the allocated GTT again.
  180. */
  181. static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
  182. struct ttm_mem_reg *mem)
  183. {
  184. struct amdgpu_gtt_mgr *mgr = man->priv;
  185. struct drm_mm_node *node = mem->mm_node;
  186. if (!node)
  187. return;
  188. spin_lock(&mgr->lock);
  189. if (node->start != AMDGPU_BO_INVALID_OFFSET)
  190. drm_mm_remove_node(node);
  191. mgr->available += mem->num_pages;
  192. spin_unlock(&mgr->lock);
  193. kfree(node);
  194. mem->mm_node = NULL;
  195. }
  196. /**
  197. * amdgpu_gtt_mgr_debug - dump VRAM table
  198. *
  199. * @man: TTM memory type manager
  200. * @prefix: text prefix
  201. *
  202. * Dump the table content using printk.
  203. */
  204. static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
  205. const char *prefix)
  206. {
  207. struct amdgpu_gtt_mgr *mgr = man->priv;
  208. struct drm_printer p = drm_debug_printer(prefix);
  209. spin_lock(&mgr->lock);
  210. drm_mm_print(&mgr->mm, &p);
  211. spin_unlock(&mgr->lock);
  212. }
  213. const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = {
  214. .init = amdgpu_gtt_mgr_init,
  215. .takedown = amdgpu_gtt_mgr_fini,
  216. .get_node = amdgpu_gtt_mgr_new,
  217. .put_node = amdgpu_gtt_mgr_del,
  218. .debug = amdgpu_gtt_mgr_debug
  219. };