amdgpu_ttm.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679
  1. /*
  2. * Copyright 2009 Jerome Glisse.
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the
  7. * "Software"), to deal in the Software without restriction, including
  8. * without limitation the rights to use, copy, modify, merge, publish,
  9. * distribute, sub license, and/or sell copies of the Software, and to
  10. * permit persons to whom the Software is furnished to do so, subject to
  11. * the following conditions:
  12. *
  13. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20. *
  21. * The above copyright notice and this permission notice (including the
  22. * next paragraph) shall be included in all copies or substantial portions
  23. * of the Software.
  24. *
  25. */
  26. /*
  27. * Authors:
  28. * Jerome Glisse <glisse@freedesktop.org>
  29. * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
  30. * Dave Airlie
  31. */
  32. #include <drm/ttm/ttm_bo_api.h>
  33. #include <drm/ttm/ttm_bo_driver.h>
  34. #include <drm/ttm/ttm_placement.h>
  35. #include <drm/ttm/ttm_module.h>
  36. #include <drm/ttm/ttm_page_alloc.h>
  37. #include <drm/drmP.h>
  38. #include <drm/amdgpu_drm.h>
  39. #include <linux/seq_file.h>
  40. #include <linux/slab.h>
  41. #include <linux/swiotlb.h>
  42. #include <linux/swap.h>
  43. #include <linux/pagemap.h>
  44. #include <linux/debugfs.h>
  45. #include "amdgpu.h"
  46. #include "bif/bif_4_1_d.h"
  47. #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
  48. static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
  49. struct ttm_mem_reg *mem, unsigned num_pages,
  50. uint64_t offset, unsigned window,
  51. struct amdgpu_ring *ring,
  52. uint64_t *addr);
  53. static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
  54. static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
  55. /*
  56. * Global memory.
  57. */
  58. static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref)
  59. {
  60. return ttm_mem_global_init(ref->object);
  61. }
  62. static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
  63. {
  64. ttm_mem_global_release(ref->object);
  65. }
  66. static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
  67. {
  68. struct drm_global_reference *global_ref;
  69. struct amdgpu_ring *ring;
  70. struct amd_sched_rq *rq;
  71. int r;
  72. adev->mman.mem_global_referenced = false;
  73. global_ref = &adev->mman.mem_global_ref;
  74. global_ref->global_type = DRM_GLOBAL_TTM_MEM;
  75. global_ref->size = sizeof(struct ttm_mem_global);
  76. global_ref->init = &amdgpu_ttm_mem_global_init;
  77. global_ref->release = &amdgpu_ttm_mem_global_release;
  78. r = drm_global_item_ref(global_ref);
  79. if (r) {
  80. DRM_ERROR("Failed setting up TTM memory accounting "
  81. "subsystem.\n");
  82. goto error_mem;
  83. }
  84. adev->mman.bo_global_ref.mem_glob =
  85. adev->mman.mem_global_ref.object;
  86. global_ref = &adev->mman.bo_global_ref.ref;
  87. global_ref->global_type = DRM_GLOBAL_TTM_BO;
  88. global_ref->size = sizeof(struct ttm_bo_global);
  89. global_ref->init = &ttm_bo_global_init;
  90. global_ref->release = &ttm_bo_global_release;
  91. r = drm_global_item_ref(global_ref);
  92. if (r) {
  93. DRM_ERROR("Failed setting up TTM BO subsystem.\n");
  94. goto error_bo;
  95. }
  96. mutex_init(&adev->mman.gtt_window_lock);
  97. ring = adev->mman.buffer_funcs_ring;
  98. rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
  99. r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
  100. rq, amdgpu_sched_jobs);
  101. if (r) {
  102. DRM_ERROR("Failed setting up TTM BO move run queue.\n");
  103. goto error_entity;
  104. }
  105. adev->mman.mem_global_referenced = true;
  106. return 0;
  107. error_entity:
  108. drm_global_item_unref(&adev->mman.bo_global_ref.ref);
  109. error_bo:
  110. drm_global_item_unref(&adev->mman.mem_global_ref);
  111. error_mem:
  112. return r;
  113. }
  114. static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
  115. {
  116. if (adev->mman.mem_global_referenced) {
  117. amd_sched_entity_fini(adev->mman.entity.sched,
  118. &adev->mman.entity);
  119. mutex_destroy(&adev->mman.gtt_window_lock);
  120. drm_global_item_unref(&adev->mman.bo_global_ref.ref);
  121. drm_global_item_unref(&adev->mman.mem_global_ref);
  122. adev->mman.mem_global_referenced = false;
  123. }
  124. }
  125. static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
  126. {
  127. return 0;
  128. }
  129. static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
  130. struct ttm_mem_type_manager *man)
  131. {
  132. struct amdgpu_device *adev;
  133. adev = amdgpu_ttm_adev(bdev);
  134. switch (type) {
  135. case TTM_PL_SYSTEM:
  136. /* System memory */
  137. man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
  138. man->available_caching = TTM_PL_MASK_CACHING;
  139. man->default_caching = TTM_PL_FLAG_CACHED;
  140. break;
  141. case TTM_PL_TT:
  142. man->func = &amdgpu_gtt_mgr_func;
  143. man->gpu_offset = adev->mc.gtt_start;
  144. man->available_caching = TTM_PL_MASK_CACHING;
  145. man->default_caching = TTM_PL_FLAG_CACHED;
  146. man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
  147. break;
  148. case TTM_PL_VRAM:
  149. /* "On-card" video ram */
  150. man->func = &amdgpu_vram_mgr_func;
  151. man->gpu_offset = adev->mc.vram_start;
  152. man->flags = TTM_MEMTYPE_FLAG_FIXED |
  153. TTM_MEMTYPE_FLAG_MAPPABLE;
  154. man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
  155. man->default_caching = TTM_PL_FLAG_WC;
  156. break;
  157. case AMDGPU_PL_GDS:
  158. case AMDGPU_PL_GWS:
  159. case AMDGPU_PL_OA:
  160. /* On-chip GDS memory*/
  161. man->func = &ttm_bo_manager_func;
  162. man->gpu_offset = 0;
  163. man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA;
  164. man->available_caching = TTM_PL_FLAG_UNCACHED;
  165. man->default_caching = TTM_PL_FLAG_UNCACHED;
  166. break;
  167. default:
  168. DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
  169. return -EINVAL;
  170. }
  171. return 0;
  172. }
  173. static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
  174. struct ttm_placement *placement)
  175. {
  176. struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
  177. struct amdgpu_bo *abo;
  178. static const struct ttm_place placements = {
  179. .fpfn = 0,
  180. .lpfn = 0,
  181. .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
  182. };
  183. if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
  184. placement->placement = &placements;
  185. placement->busy_placement = &placements;
  186. placement->num_placement = 1;
  187. placement->num_busy_placement = 1;
  188. return;
  189. }
  190. abo = container_of(bo, struct amdgpu_bo, tbo);
  191. switch (bo->mem.mem_type) {
  192. case TTM_PL_VRAM:
  193. if (adev->mman.buffer_funcs &&
  194. adev->mman.buffer_funcs_ring &&
  195. adev->mman.buffer_funcs_ring->ready == false) {
  196. amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
  197. } else {
  198. amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
  199. }
  200. break;
  201. case TTM_PL_TT:
  202. default:
  203. amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
  204. }
  205. *placement = abo->placement;
  206. }
  207. static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
  208. {
  209. struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo);
  210. if (amdgpu_ttm_tt_get_usermm(bo->ttm))
  211. return -EPERM;
  212. return drm_vma_node_verify_access(&abo->gem_base.vma_node,
  213. filp->private_data);
  214. }
  215. static void amdgpu_move_null(struct ttm_buffer_object *bo,
  216. struct ttm_mem_reg *new_mem)
  217. {
  218. struct ttm_mem_reg *old_mem = &bo->mem;
  219. BUG_ON(old_mem->mm_node != NULL);
  220. *old_mem = *new_mem;
  221. new_mem->mm_node = NULL;
  222. }
  223. static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
  224. struct drm_mm_node *mm_node,
  225. struct ttm_mem_reg *mem)
  226. {
  227. uint64_t addr = 0;
  228. if (mem->mem_type != TTM_PL_TT ||
  229. amdgpu_gtt_mgr_is_allocated(mem)) {
  230. addr = mm_node->start << PAGE_SHIFT;
  231. addr += bo->bdev->man[mem->mem_type].gpu_offset;
  232. }
  233. return addr;
  234. }
  235. static int amdgpu_move_blit(struct ttm_buffer_object *bo,
  236. bool evict, bool no_wait_gpu,
  237. struct ttm_mem_reg *new_mem,
  238. struct ttm_mem_reg *old_mem)
  239. {
  240. struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
  241. struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
  242. struct drm_mm_node *old_mm, *new_mm;
  243. uint64_t old_start, old_size, new_start, new_size;
  244. unsigned long num_pages;
  245. struct dma_fence *fence = NULL;
  246. int r;
  247. BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
  248. if (!ring->ready) {
  249. DRM_ERROR("Trying to move memory with ring turned off.\n");
  250. return -EINVAL;
  251. }
  252. old_mm = old_mem->mm_node;
  253. old_size = old_mm->size;
  254. old_start = amdgpu_mm_node_addr(bo, old_mm, old_mem);
  255. new_mm = new_mem->mm_node;
  256. new_size = new_mm->size;
  257. new_start = amdgpu_mm_node_addr(bo, new_mm, new_mem);
  258. num_pages = new_mem->num_pages;
  259. mutex_lock(&adev->mman.gtt_window_lock);
  260. while (num_pages) {
  261. unsigned long cur_pages = min(min(old_size, new_size),
  262. (u64)AMDGPU_GTT_MAX_TRANSFER_SIZE);
  263. uint64_t from = old_start, to = new_start;
  264. struct dma_fence *next;
  265. if (old_mem->mem_type == TTM_PL_TT &&
  266. !amdgpu_gtt_mgr_is_allocated(old_mem)) {
  267. r = amdgpu_map_buffer(bo, old_mem, cur_pages,
  268. old_start, 0, ring, &from);
  269. if (r)
  270. goto error;
  271. }
  272. if (new_mem->mem_type == TTM_PL_TT &&
  273. !amdgpu_gtt_mgr_is_allocated(new_mem)) {
  274. r = amdgpu_map_buffer(bo, new_mem, cur_pages,
  275. new_start, 1, ring, &to);
  276. if (r)
  277. goto error;
  278. }
  279. r = amdgpu_copy_buffer(ring, from, to,
  280. cur_pages * PAGE_SIZE,
  281. bo->resv, &next, false, true);
  282. if (r)
  283. goto error;
  284. dma_fence_put(fence);
  285. fence = next;
  286. num_pages -= cur_pages;
  287. if (!num_pages)
  288. break;
  289. old_size -= cur_pages;
  290. if (!old_size) {
  291. old_start = amdgpu_mm_node_addr(bo, ++old_mm, old_mem);
  292. old_size = old_mm->size;
  293. } else {
  294. old_start += cur_pages * PAGE_SIZE;
  295. }
  296. new_size -= cur_pages;
  297. if (!new_size) {
  298. new_start = amdgpu_mm_node_addr(bo, ++new_mm, new_mem);
  299. new_size = new_mm->size;
  300. } else {
  301. new_start += cur_pages * PAGE_SIZE;
  302. }
  303. }
  304. mutex_unlock(&adev->mman.gtt_window_lock);
  305. r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
  306. dma_fence_put(fence);
  307. return r;
  308. error:
  309. mutex_unlock(&adev->mman.gtt_window_lock);
  310. if (fence)
  311. dma_fence_wait(fence, false);
  312. dma_fence_put(fence);
  313. return r;
  314. }
  315. static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
  316. bool evict, bool interruptible,
  317. bool no_wait_gpu,
  318. struct ttm_mem_reg *new_mem)
  319. {
  320. struct amdgpu_device *adev;
  321. struct ttm_mem_reg *old_mem = &bo->mem;
  322. struct ttm_mem_reg tmp_mem;
  323. struct ttm_place placements;
  324. struct ttm_placement placement;
  325. int r;
  326. adev = amdgpu_ttm_adev(bo->bdev);
  327. tmp_mem = *new_mem;
  328. tmp_mem.mm_node = NULL;
  329. placement.num_placement = 1;
  330. placement.placement = &placements;
  331. placement.num_busy_placement = 1;
  332. placement.busy_placement = &placements;
  333. placements.fpfn = 0;
  334. placements.lpfn = 0;
  335. placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
  336. r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
  337. interruptible, no_wait_gpu);
  338. if (unlikely(r)) {
  339. return r;
  340. }
  341. r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
  342. if (unlikely(r)) {
  343. goto out_cleanup;
  344. }
  345. r = ttm_tt_bind(bo->ttm, &tmp_mem);
  346. if (unlikely(r)) {
  347. goto out_cleanup;
  348. }
  349. r = amdgpu_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
  350. if (unlikely(r)) {
  351. goto out_cleanup;
  352. }
  353. r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem);
  354. out_cleanup:
  355. ttm_bo_mem_put(bo, &tmp_mem);
  356. return r;
  357. }
  358. static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
  359. bool evict, bool interruptible,
  360. bool no_wait_gpu,
  361. struct ttm_mem_reg *new_mem)
  362. {
  363. struct amdgpu_device *adev;
  364. struct ttm_mem_reg *old_mem = &bo->mem;
  365. struct ttm_mem_reg tmp_mem;
  366. struct ttm_placement placement;
  367. struct ttm_place placements;
  368. int r;
  369. adev = amdgpu_ttm_adev(bo->bdev);
  370. tmp_mem = *new_mem;
  371. tmp_mem.mm_node = NULL;
  372. placement.num_placement = 1;
  373. placement.placement = &placements;
  374. placement.num_busy_placement = 1;
  375. placement.busy_placement = &placements;
  376. placements.fpfn = 0;
  377. placements.lpfn = 0;
  378. placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
  379. r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
  380. interruptible, no_wait_gpu);
  381. if (unlikely(r)) {
  382. return r;
  383. }
  384. r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem);
  385. if (unlikely(r)) {
  386. goto out_cleanup;
  387. }
  388. r = amdgpu_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
  389. if (unlikely(r)) {
  390. goto out_cleanup;
  391. }
  392. out_cleanup:
  393. ttm_bo_mem_put(bo, &tmp_mem);
  394. return r;
  395. }
  396. static int amdgpu_bo_move(struct ttm_buffer_object *bo,
  397. bool evict, bool interruptible,
  398. bool no_wait_gpu,
  399. struct ttm_mem_reg *new_mem)
  400. {
  401. struct amdgpu_device *adev;
  402. struct amdgpu_bo *abo;
  403. struct ttm_mem_reg *old_mem = &bo->mem;
  404. int r;
  405. /* Can't move a pinned BO */
  406. abo = container_of(bo, struct amdgpu_bo, tbo);
  407. if (WARN_ON_ONCE(abo->pin_count > 0))
  408. return -EINVAL;
  409. adev = amdgpu_ttm_adev(bo->bdev);
  410. if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
  411. amdgpu_move_null(bo, new_mem);
  412. return 0;
  413. }
  414. if ((old_mem->mem_type == TTM_PL_TT &&
  415. new_mem->mem_type == TTM_PL_SYSTEM) ||
  416. (old_mem->mem_type == TTM_PL_SYSTEM &&
  417. new_mem->mem_type == TTM_PL_TT)) {
  418. /* bind is enough */
  419. amdgpu_move_null(bo, new_mem);
  420. return 0;
  421. }
  422. if (adev->mman.buffer_funcs == NULL ||
  423. adev->mman.buffer_funcs_ring == NULL ||
  424. !adev->mman.buffer_funcs_ring->ready) {
  425. /* use memcpy */
  426. goto memcpy;
  427. }
  428. if (old_mem->mem_type == TTM_PL_VRAM &&
  429. new_mem->mem_type == TTM_PL_SYSTEM) {
  430. r = amdgpu_move_vram_ram(bo, evict, interruptible,
  431. no_wait_gpu, new_mem);
  432. } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
  433. new_mem->mem_type == TTM_PL_VRAM) {
  434. r = amdgpu_move_ram_vram(bo, evict, interruptible,
  435. no_wait_gpu, new_mem);
  436. } else {
  437. r = amdgpu_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem);
  438. }
  439. if (r) {
  440. memcpy:
  441. r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
  442. if (r) {
  443. return r;
  444. }
  445. }
  446. /* update statistics */
  447. atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
  448. return 0;
  449. }
  450. static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  451. {
  452. struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
  453. struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
  454. mem->bus.addr = NULL;
  455. mem->bus.offset = 0;
  456. mem->bus.size = mem->num_pages << PAGE_SHIFT;
  457. mem->bus.base = 0;
  458. mem->bus.is_iomem = false;
  459. if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
  460. return -EINVAL;
  461. switch (mem->mem_type) {
  462. case TTM_PL_SYSTEM:
  463. /* system memory */
  464. return 0;
  465. case TTM_PL_TT:
  466. break;
  467. case TTM_PL_VRAM:
  468. mem->bus.offset = mem->start << PAGE_SHIFT;
  469. /* check if it's visible */
  470. if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size)
  471. return -EINVAL;
  472. mem->bus.base = adev->mc.aper_base;
  473. mem->bus.is_iomem = true;
  474. break;
  475. default:
  476. return -EINVAL;
  477. }
  478. return 0;
  479. }
  480. static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  481. {
  482. }
  483. static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
  484. unsigned long page_offset)
  485. {
  486. struct drm_mm_node *mm = bo->mem.mm_node;
  487. uint64_t size = mm->size;
  488. uint64_t offset = page_offset;
  489. page_offset = do_div(offset, size);
  490. mm += offset;
  491. return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + page_offset;
  492. }
  493. /*
  494. * TTM backend functions.
  495. */
  496. struct amdgpu_ttm_gup_task_list {
  497. struct list_head list;
  498. struct task_struct *task;
  499. };
  500. struct amdgpu_ttm_tt {
  501. struct ttm_dma_tt ttm;
  502. struct amdgpu_device *adev;
  503. u64 offset;
  504. uint64_t userptr;
  505. struct mm_struct *usermm;
  506. uint32_t userflags;
  507. spinlock_t guptasklock;
  508. struct list_head guptasks;
  509. atomic_t mmu_invalidations;
  510. struct list_head list;
  511. };
  512. int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
  513. {
  514. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  515. unsigned int flags = 0;
  516. unsigned pinned = 0;
  517. int r;
  518. if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
  519. flags |= FOLL_WRITE;
  520. if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
  521. /* check that we only use anonymous memory
  522. to prevent problems with writeback */
  523. unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
  524. struct vm_area_struct *vma;
  525. vma = find_vma(gtt->usermm, gtt->userptr);
  526. if (!vma || vma->vm_file || vma->vm_end < end)
  527. return -EPERM;
  528. }
  529. do {
  530. unsigned num_pages = ttm->num_pages - pinned;
  531. uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
  532. struct page **p = pages + pinned;
  533. struct amdgpu_ttm_gup_task_list guptask;
  534. guptask.task = current;
  535. spin_lock(&gtt->guptasklock);
  536. list_add(&guptask.list, &gtt->guptasks);
  537. spin_unlock(&gtt->guptasklock);
  538. r = get_user_pages(userptr, num_pages, flags, p, NULL);
  539. spin_lock(&gtt->guptasklock);
  540. list_del(&guptask.list);
  541. spin_unlock(&gtt->guptasklock);
  542. if (r < 0)
  543. goto release_pages;
  544. pinned += r;
  545. } while (pinned < ttm->num_pages);
  546. return 0;
  547. release_pages:
  548. release_pages(pages, pinned, 0);
  549. return r;
  550. }
  551. /* prepare the sg table with the user pages */
  552. static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
  553. {
  554. struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
  555. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  556. unsigned nents;
  557. int r;
  558. int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
  559. enum dma_data_direction direction = write ?
  560. DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
  561. r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
  562. ttm->num_pages << PAGE_SHIFT,
  563. GFP_KERNEL);
  564. if (r)
  565. goto release_sg;
  566. r = -ENOMEM;
  567. nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
  568. if (nents != ttm->sg->nents)
  569. goto release_sg;
  570. drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
  571. gtt->ttm.dma_address, ttm->num_pages);
  572. return 0;
  573. release_sg:
  574. kfree(ttm->sg);
  575. return r;
  576. }
  577. static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
  578. {
  579. struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
  580. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  581. struct sg_page_iter sg_iter;
  582. int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
  583. enum dma_data_direction direction = write ?
  584. DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
  585. /* double check that we don't free the table twice */
  586. if (!ttm->sg->sgl)
  587. return;
  588. /* free the sg table and pages again */
  589. dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
  590. for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
  591. struct page *page = sg_page_iter_page(&sg_iter);
  592. if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
  593. set_page_dirty(page);
  594. mark_page_accessed(page);
  595. put_page(page);
  596. }
  597. sg_free_table(ttm->sg);
  598. }
  599. static int amdgpu_ttm_do_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
  600. {
  601. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  602. uint64_t flags;
  603. int r;
  604. spin_lock(&gtt->adev->gtt_list_lock);
  605. flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, mem);
  606. gtt->offset = (u64)mem->start << PAGE_SHIFT;
  607. r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
  608. ttm->pages, gtt->ttm.dma_address, flags);
  609. if (r) {
  610. DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
  611. ttm->num_pages, gtt->offset);
  612. goto error_gart_bind;
  613. }
  614. list_add_tail(&gtt->list, &gtt->adev->gtt_list);
  615. error_gart_bind:
  616. spin_unlock(&gtt->adev->gtt_list_lock);
  617. return r;
  618. }
  619. static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
  620. struct ttm_mem_reg *bo_mem)
  621. {
  622. struct amdgpu_ttm_tt *gtt = (void*)ttm;
  623. int r;
  624. if (gtt->userptr) {
  625. r = amdgpu_ttm_tt_pin_userptr(ttm);
  626. if (r) {
  627. DRM_ERROR("failed to pin userptr\n");
  628. return r;
  629. }
  630. }
  631. if (!ttm->num_pages) {
  632. WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
  633. ttm->num_pages, bo_mem, ttm);
  634. }
  635. if (bo_mem->mem_type == AMDGPU_PL_GDS ||
  636. bo_mem->mem_type == AMDGPU_PL_GWS ||
  637. bo_mem->mem_type == AMDGPU_PL_OA)
  638. return -EINVAL;
  639. if (amdgpu_gtt_mgr_is_allocated(bo_mem))
  640. r = amdgpu_ttm_do_bind(ttm, bo_mem);
  641. return r;
  642. }
  643. bool amdgpu_ttm_is_bound(struct ttm_tt *ttm)
  644. {
  645. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  646. return gtt && !list_empty(&gtt->list);
  647. }
  648. int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
  649. {
  650. struct ttm_tt *ttm = bo->ttm;
  651. int r;
  652. if (!ttm || amdgpu_ttm_is_bound(ttm))
  653. return 0;
  654. r = amdgpu_gtt_mgr_alloc(&bo->bdev->man[TTM_PL_TT], bo,
  655. NULL, bo_mem);
  656. if (r) {
  657. DRM_ERROR("Failed to allocate GTT address space (%d)\n", r);
  658. return r;
  659. }
  660. return amdgpu_ttm_do_bind(ttm, bo_mem);
  661. }
  662. int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
  663. {
  664. struct amdgpu_ttm_tt *gtt, *tmp;
  665. struct ttm_mem_reg bo_mem;
  666. uint64_t flags;
  667. int r;
  668. bo_mem.mem_type = TTM_PL_TT;
  669. spin_lock(&adev->gtt_list_lock);
  670. list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) {
  671. flags = amdgpu_ttm_tt_pte_flags(gtt->adev, &gtt->ttm.ttm, &bo_mem);
  672. r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
  673. gtt->ttm.ttm.pages, gtt->ttm.dma_address,
  674. flags);
  675. if (r) {
  676. spin_unlock(&adev->gtt_list_lock);
  677. DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
  678. gtt->ttm.ttm.num_pages, gtt->offset);
  679. return r;
  680. }
  681. }
  682. spin_unlock(&adev->gtt_list_lock);
  683. return 0;
  684. }
  685. static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
  686. {
  687. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  688. int r;
  689. if (gtt->userptr)
  690. amdgpu_ttm_tt_unpin_userptr(ttm);
  691. if (!amdgpu_ttm_is_bound(ttm))
  692. return 0;
  693. /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
  694. spin_lock(&gtt->adev->gtt_list_lock);
  695. r = amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
  696. if (r) {
  697. DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
  698. gtt->ttm.ttm.num_pages, gtt->offset);
  699. goto error_unbind;
  700. }
  701. list_del_init(&gtt->list);
  702. error_unbind:
  703. spin_unlock(&gtt->adev->gtt_list_lock);
  704. return r;
  705. }
  706. static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
  707. {
  708. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  709. ttm_dma_tt_fini(&gtt->ttm);
  710. kfree(gtt);
  711. }
  712. static struct ttm_backend_func amdgpu_backend_func = {
  713. .bind = &amdgpu_ttm_backend_bind,
  714. .unbind = &amdgpu_ttm_backend_unbind,
  715. .destroy = &amdgpu_ttm_backend_destroy,
  716. };
  717. static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
  718. unsigned long size, uint32_t page_flags,
  719. struct page *dummy_read_page)
  720. {
  721. struct amdgpu_device *adev;
  722. struct amdgpu_ttm_tt *gtt;
  723. adev = amdgpu_ttm_adev(bdev);
  724. gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
  725. if (gtt == NULL) {
  726. return NULL;
  727. }
  728. gtt->ttm.ttm.func = &amdgpu_backend_func;
  729. gtt->adev = adev;
  730. if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
  731. kfree(gtt);
  732. return NULL;
  733. }
  734. INIT_LIST_HEAD(&gtt->list);
  735. return &gtt->ttm.ttm;
  736. }
  737. static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
  738. {
  739. struct amdgpu_device *adev;
  740. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  741. unsigned i;
  742. int r;
  743. bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
  744. if (ttm->state != tt_unpopulated)
  745. return 0;
  746. if (gtt && gtt->userptr) {
  747. ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
  748. if (!ttm->sg)
  749. return -ENOMEM;
  750. ttm->page_flags |= TTM_PAGE_FLAG_SG;
  751. ttm->state = tt_unbound;
  752. return 0;
  753. }
  754. if (slave && ttm->sg) {
  755. drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
  756. gtt->ttm.dma_address, ttm->num_pages);
  757. ttm->state = tt_unbound;
  758. return 0;
  759. }
  760. adev = amdgpu_ttm_adev(ttm->bdev);
  761. #ifdef CONFIG_SWIOTLB
  762. if (swiotlb_nr_tbl()) {
  763. return ttm_dma_populate(&gtt->ttm, adev->dev);
  764. }
  765. #endif
  766. r = ttm_pool_populate(ttm);
  767. if (r) {
  768. return r;
  769. }
  770. for (i = 0; i < ttm->num_pages; i++) {
  771. gtt->ttm.dma_address[i] = pci_map_page(adev->pdev, ttm->pages[i],
  772. 0, PAGE_SIZE,
  773. PCI_DMA_BIDIRECTIONAL);
  774. if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
  775. while (i--) {
  776. pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
  777. PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
  778. gtt->ttm.dma_address[i] = 0;
  779. }
  780. ttm_pool_unpopulate(ttm);
  781. return -EFAULT;
  782. }
  783. }
  784. return 0;
  785. }
  786. static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
  787. {
  788. struct amdgpu_device *adev;
  789. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  790. unsigned i;
  791. bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
  792. if (gtt && gtt->userptr) {
  793. kfree(ttm->sg);
  794. ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
  795. return;
  796. }
  797. if (slave)
  798. return;
  799. adev = amdgpu_ttm_adev(ttm->bdev);
  800. #ifdef CONFIG_SWIOTLB
  801. if (swiotlb_nr_tbl()) {
  802. ttm_dma_unpopulate(&gtt->ttm, adev->dev);
  803. return;
  804. }
  805. #endif
  806. for (i = 0; i < ttm->num_pages; i++) {
  807. if (gtt->ttm.dma_address[i]) {
  808. pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
  809. PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
  810. }
  811. }
  812. ttm_pool_unpopulate(ttm);
  813. }
  814. int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
  815. uint32_t flags)
  816. {
  817. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  818. if (gtt == NULL)
  819. return -EINVAL;
  820. gtt->userptr = addr;
  821. gtt->usermm = current->mm;
  822. gtt->userflags = flags;
  823. spin_lock_init(&gtt->guptasklock);
  824. INIT_LIST_HEAD(&gtt->guptasks);
  825. atomic_set(&gtt->mmu_invalidations, 0);
  826. return 0;
  827. }
  828. struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
  829. {
  830. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  831. if (gtt == NULL)
  832. return NULL;
  833. return gtt->usermm;
  834. }
  835. bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
  836. unsigned long end)
  837. {
  838. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  839. struct amdgpu_ttm_gup_task_list *entry;
  840. unsigned long size;
  841. if (gtt == NULL || !gtt->userptr)
  842. return false;
  843. size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
  844. if (gtt->userptr > end || gtt->userptr + size <= start)
  845. return false;
  846. spin_lock(&gtt->guptasklock);
  847. list_for_each_entry(entry, &gtt->guptasks, list) {
  848. if (entry->task == current) {
  849. spin_unlock(&gtt->guptasklock);
  850. return false;
  851. }
  852. }
  853. spin_unlock(&gtt->guptasklock);
  854. atomic_inc(&gtt->mmu_invalidations);
  855. return true;
  856. }
  857. bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
  858. int *last_invalidated)
  859. {
  860. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  861. int prev_invalidated = *last_invalidated;
  862. *last_invalidated = atomic_read(&gtt->mmu_invalidations);
  863. return prev_invalidated != *last_invalidated;
  864. }
  865. bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
  866. {
  867. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  868. if (gtt == NULL)
  869. return false;
  870. return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
  871. }
  872. uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
  873. struct ttm_mem_reg *mem)
  874. {
  875. uint64_t flags = 0;
  876. if (mem && mem->mem_type != TTM_PL_SYSTEM)
  877. flags |= AMDGPU_PTE_VALID;
  878. if (mem && mem->mem_type == TTM_PL_TT) {
  879. flags |= AMDGPU_PTE_SYSTEM;
  880. if (ttm->caching_state == tt_cached)
  881. flags |= AMDGPU_PTE_SNOOPED;
  882. }
  883. flags |= adev->gart.gart_pte_flags;
  884. flags |= AMDGPU_PTE_READABLE;
  885. if (!amdgpu_ttm_tt_is_readonly(ttm))
  886. flags |= AMDGPU_PTE_WRITEABLE;
  887. return flags;
  888. }
  889. static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
  890. const struct ttm_place *place)
  891. {
  892. unsigned long num_pages = bo->mem.num_pages;
  893. struct drm_mm_node *node = bo->mem.mm_node;
  894. if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
  895. return ttm_bo_eviction_valuable(bo, place);
  896. switch (bo->mem.mem_type) {
  897. case TTM_PL_TT:
  898. return true;
  899. case TTM_PL_VRAM:
  900. /* Check each drm MM node individually */
  901. while (num_pages) {
  902. if (place->fpfn < (node->start + node->size) &&
  903. !(place->lpfn && place->lpfn <= node->start))
  904. return true;
  905. num_pages -= node->size;
  906. ++node;
  907. }
  908. break;
  909. default:
  910. break;
  911. }
  912. return ttm_bo_eviction_valuable(bo, place);
  913. }
  914. static struct ttm_bo_driver amdgpu_bo_driver = {
  915. .ttm_tt_create = &amdgpu_ttm_tt_create,
  916. .ttm_tt_populate = &amdgpu_ttm_tt_populate,
  917. .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
  918. .invalidate_caches = &amdgpu_invalidate_caches,
  919. .init_mem_type = &amdgpu_init_mem_type,
  920. .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
  921. .evict_flags = &amdgpu_evict_flags,
  922. .move = &amdgpu_bo_move,
  923. .verify_access = &amdgpu_verify_access,
  924. .move_notify = &amdgpu_bo_move_notify,
  925. .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
  926. .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
  927. .io_mem_free = &amdgpu_ttm_io_mem_free,
  928. .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
  929. };
  930. int amdgpu_ttm_init(struct amdgpu_device *adev)
  931. {
  932. int r;
  933. r = amdgpu_ttm_global_init(adev);
  934. if (r) {
  935. return r;
  936. }
  937. /* No others user of address space so set it to 0 */
  938. r = ttm_bo_device_init(&adev->mman.bdev,
  939. adev->mman.bo_global_ref.ref.object,
  940. &amdgpu_bo_driver,
  941. adev->ddev->anon_inode->i_mapping,
  942. DRM_FILE_PAGE_OFFSET,
  943. adev->need_dma32);
  944. if (r) {
  945. DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
  946. return r;
  947. }
  948. adev->mman.initialized = true;
  949. r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
  950. adev->mc.real_vram_size >> PAGE_SHIFT);
  951. if (r) {
  952. DRM_ERROR("Failed initializing VRAM heap.\n");
  953. return r;
  954. }
  955. /* Change the size here instead of the init above so only lpfn is affected */
  956. amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
  957. r = amdgpu_bo_create(adev, adev->mc.stolen_size, PAGE_SIZE, true,
  958. AMDGPU_GEM_DOMAIN_VRAM,
  959. AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
  960. AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
  961. NULL, NULL, &adev->stollen_vga_memory);
  962. if (r) {
  963. return r;
  964. }
  965. r = amdgpu_bo_reserve(adev->stollen_vga_memory, false);
  966. if (r)
  967. return r;
  968. r = amdgpu_bo_pin(adev->stollen_vga_memory, AMDGPU_GEM_DOMAIN_VRAM, NULL);
  969. amdgpu_bo_unreserve(adev->stollen_vga_memory);
  970. if (r) {
  971. amdgpu_bo_unref(&adev->stollen_vga_memory);
  972. return r;
  973. }
  974. DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
  975. (unsigned) (adev->mc.real_vram_size / (1024 * 1024)));
  976. r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT,
  977. adev->mc.gtt_size >> PAGE_SHIFT);
  978. if (r) {
  979. DRM_ERROR("Failed initializing GTT heap.\n");
  980. return r;
  981. }
  982. DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
  983. (unsigned)(adev->mc.gtt_size / (1024 * 1024)));
  984. adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
  985. adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
  986. adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT;
  987. adev->gds.gws.total_size = adev->gds.gws.total_size << AMDGPU_GWS_SHIFT;
  988. adev->gds.gws.gfx_partition_size = adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT;
  989. adev->gds.gws.cs_partition_size = adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT;
  990. adev->gds.oa.total_size = adev->gds.oa.total_size << AMDGPU_OA_SHIFT;
  991. adev->gds.oa.gfx_partition_size = adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT;
  992. adev->gds.oa.cs_partition_size = adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT;
  993. /* GDS Memory */
  994. if (adev->gds.mem.total_size) {
  995. r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
  996. adev->gds.mem.total_size >> PAGE_SHIFT);
  997. if (r) {
  998. DRM_ERROR("Failed initializing GDS heap.\n");
  999. return r;
  1000. }
  1001. }
  1002. /* GWS */
  1003. if (adev->gds.gws.total_size) {
  1004. r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
  1005. adev->gds.gws.total_size >> PAGE_SHIFT);
  1006. if (r) {
  1007. DRM_ERROR("Failed initializing gws heap.\n");
  1008. return r;
  1009. }
  1010. }
  1011. /* OA */
  1012. if (adev->gds.oa.total_size) {
  1013. r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
  1014. adev->gds.oa.total_size >> PAGE_SHIFT);
  1015. if (r) {
  1016. DRM_ERROR("Failed initializing oa heap.\n");
  1017. return r;
  1018. }
  1019. }
  1020. r = amdgpu_ttm_debugfs_init(adev);
  1021. if (r) {
  1022. DRM_ERROR("Failed to init debugfs\n");
  1023. return r;
  1024. }
  1025. return 0;
  1026. }
  1027. void amdgpu_ttm_fini(struct amdgpu_device *adev)
  1028. {
  1029. int r;
  1030. if (!adev->mman.initialized)
  1031. return;
  1032. amdgpu_ttm_debugfs_fini(adev);
  1033. if (adev->stollen_vga_memory) {
  1034. r = amdgpu_bo_reserve(adev->stollen_vga_memory, true);
  1035. if (r == 0) {
  1036. amdgpu_bo_unpin(adev->stollen_vga_memory);
  1037. amdgpu_bo_unreserve(adev->stollen_vga_memory);
  1038. }
  1039. amdgpu_bo_unref(&adev->stollen_vga_memory);
  1040. }
  1041. ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
  1042. ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
  1043. if (adev->gds.mem.total_size)
  1044. ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS);
  1045. if (adev->gds.gws.total_size)
  1046. ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
  1047. if (adev->gds.oa.total_size)
  1048. ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
  1049. ttm_bo_device_release(&adev->mman.bdev);
  1050. amdgpu_gart_fini(adev);
  1051. amdgpu_ttm_global_fini(adev);
  1052. adev->mman.initialized = false;
  1053. DRM_INFO("amdgpu: ttm finalized\n");
  1054. }
  1055. /* this should only be called at bootup or when userspace
  1056. * isn't running */
  1057. void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size)
  1058. {
  1059. struct ttm_mem_type_manager *man;
  1060. if (!adev->mman.initialized)
  1061. return;
  1062. man = &adev->mman.bdev.man[TTM_PL_VRAM];
  1063. /* this just adjusts TTM size idea, which sets lpfn to the correct value */
  1064. man->size = size >> PAGE_SHIFT;
  1065. }
  1066. int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
  1067. {
  1068. struct drm_file *file_priv;
  1069. struct amdgpu_device *adev;
  1070. if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
  1071. return -EINVAL;
  1072. file_priv = filp->private_data;
  1073. adev = file_priv->minor->dev->dev_private;
  1074. if (adev == NULL)
  1075. return -EINVAL;
  1076. return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
  1077. }
  1078. static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
  1079. struct ttm_mem_reg *mem, unsigned num_pages,
  1080. uint64_t offset, unsigned window,
  1081. struct amdgpu_ring *ring,
  1082. uint64_t *addr)
  1083. {
  1084. struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
  1085. struct amdgpu_device *adev = ring->adev;
  1086. struct ttm_tt *ttm = bo->ttm;
  1087. struct amdgpu_job *job;
  1088. unsigned num_dw, num_bytes;
  1089. dma_addr_t *dma_address;
  1090. struct dma_fence *fence;
  1091. uint64_t src_addr, dst_addr;
  1092. uint64_t flags;
  1093. int r;
  1094. BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
  1095. AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
  1096. *addr = adev->mc.gtt_start;
  1097. *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
  1098. AMDGPU_GPU_PAGE_SIZE;
  1099. num_dw = adev->mman.buffer_funcs->copy_num_dw;
  1100. while (num_dw & 0x7)
  1101. num_dw++;
  1102. num_bytes = num_pages * 8;
  1103. r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
  1104. if (r)
  1105. return r;
  1106. src_addr = num_dw * 4;
  1107. src_addr += job->ibs[0].gpu_addr;
  1108. dst_addr = adev->gart.table_addr;
  1109. dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
  1110. amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
  1111. dst_addr, num_bytes);
  1112. amdgpu_ring_pad_ib(ring, &job->ibs[0]);
  1113. WARN_ON(job->ibs[0].length_dw > num_dw);
  1114. dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
  1115. flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
  1116. r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
  1117. &job->ibs[0].ptr[num_dw]);
  1118. if (r)
  1119. goto error_free;
  1120. r = amdgpu_job_submit(job, ring, &adev->mman.entity,
  1121. AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
  1122. if (r)
  1123. goto error_free;
  1124. dma_fence_put(fence);
  1125. return r;
  1126. error_free:
  1127. amdgpu_job_free(job);
  1128. return r;
  1129. }
  1130. int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
  1131. uint64_t dst_offset, uint32_t byte_count,
  1132. struct reservation_object *resv,
  1133. struct dma_fence **fence, bool direct_submit,
  1134. bool vm_needs_flush)
  1135. {
  1136. struct amdgpu_device *adev = ring->adev;
  1137. struct amdgpu_job *job;
  1138. uint32_t max_bytes;
  1139. unsigned num_loops, num_dw;
  1140. unsigned i;
  1141. int r;
  1142. max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
  1143. num_loops = DIV_ROUND_UP(byte_count, max_bytes);
  1144. num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
  1145. /* for IB padding */
  1146. while (num_dw & 0x7)
  1147. num_dw++;
  1148. r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
  1149. if (r)
  1150. return r;
  1151. job->vm_needs_flush = vm_needs_flush;
  1152. if (resv) {
  1153. r = amdgpu_sync_resv(adev, &job->sync, resv,
  1154. AMDGPU_FENCE_OWNER_UNDEFINED);
  1155. if (r) {
  1156. DRM_ERROR("sync failed (%d).\n", r);
  1157. goto error_free;
  1158. }
  1159. }
  1160. for (i = 0; i < num_loops; i++) {
  1161. uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
  1162. amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
  1163. dst_offset, cur_size_in_bytes);
  1164. src_offset += cur_size_in_bytes;
  1165. dst_offset += cur_size_in_bytes;
  1166. byte_count -= cur_size_in_bytes;
  1167. }
  1168. amdgpu_ring_pad_ib(ring, &job->ibs[0]);
  1169. WARN_ON(job->ibs[0].length_dw > num_dw);
  1170. if (direct_submit) {
  1171. r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
  1172. NULL, fence);
  1173. job->fence = dma_fence_get(*fence);
  1174. if (r)
  1175. DRM_ERROR("Error scheduling IBs (%d)\n", r);
  1176. amdgpu_job_free(job);
  1177. } else {
  1178. r = amdgpu_job_submit(job, ring, &adev->mman.entity,
  1179. AMDGPU_FENCE_OWNER_UNDEFINED, fence);
  1180. if (r)
  1181. goto error_free;
  1182. }
  1183. return r;
  1184. error_free:
  1185. amdgpu_job_free(job);
  1186. return r;
  1187. }
  1188. int amdgpu_fill_buffer(struct amdgpu_bo *bo,
  1189. uint32_t src_data,
  1190. struct reservation_object *resv,
  1191. struct dma_fence **fence)
  1192. {
  1193. struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
  1194. uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
  1195. struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
  1196. struct drm_mm_node *mm_node;
  1197. unsigned long num_pages;
  1198. unsigned int num_loops, num_dw;
  1199. struct amdgpu_job *job;
  1200. int r;
  1201. if (!ring->ready) {
  1202. DRM_ERROR("Trying to clear memory with ring turned off.\n");
  1203. return -EINVAL;
  1204. }
  1205. if (bo->tbo.mem.mem_type == TTM_PL_TT) {
  1206. r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
  1207. if (r)
  1208. return r;
  1209. }
  1210. num_pages = bo->tbo.num_pages;
  1211. mm_node = bo->tbo.mem.mm_node;
  1212. num_loops = 0;
  1213. while (num_pages) {
  1214. uint32_t byte_count = mm_node->size << PAGE_SHIFT;
  1215. num_loops += DIV_ROUND_UP(byte_count, max_bytes);
  1216. num_pages -= mm_node->size;
  1217. ++mm_node;
  1218. }
  1219. num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
  1220. /* for IB padding */
  1221. num_dw += 64;
  1222. r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
  1223. if (r)
  1224. return r;
  1225. if (resv) {
  1226. r = amdgpu_sync_resv(adev, &job->sync, resv,
  1227. AMDGPU_FENCE_OWNER_UNDEFINED);
  1228. if (r) {
  1229. DRM_ERROR("sync failed (%d).\n", r);
  1230. goto error_free;
  1231. }
  1232. }
  1233. num_pages = bo->tbo.num_pages;
  1234. mm_node = bo->tbo.mem.mm_node;
  1235. while (num_pages) {
  1236. uint32_t byte_count = mm_node->size << PAGE_SHIFT;
  1237. uint64_t dst_addr;
  1238. dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
  1239. while (byte_count) {
  1240. uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
  1241. amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
  1242. dst_addr, cur_size_in_bytes);
  1243. dst_addr += cur_size_in_bytes;
  1244. byte_count -= cur_size_in_bytes;
  1245. }
  1246. num_pages -= mm_node->size;
  1247. ++mm_node;
  1248. }
  1249. amdgpu_ring_pad_ib(ring, &job->ibs[0]);
  1250. WARN_ON(job->ibs[0].length_dw > num_dw);
  1251. r = amdgpu_job_submit(job, ring, &adev->mman.entity,
  1252. AMDGPU_FENCE_OWNER_UNDEFINED, fence);
  1253. if (r)
  1254. goto error_free;
  1255. return 0;
  1256. error_free:
  1257. amdgpu_job_free(job);
  1258. return r;
  1259. }
  1260. #if defined(CONFIG_DEBUG_FS)
  1261. extern void amdgpu_gtt_mgr_print(struct seq_file *m, struct ttm_mem_type_manager
  1262. *man);
  1263. static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
  1264. {
  1265. struct drm_info_node *node = (struct drm_info_node *)m->private;
  1266. unsigned ttm_pl = *(int *)node->info_ent->data;
  1267. struct drm_device *dev = node->minor->dev;
  1268. struct amdgpu_device *adev = dev->dev_private;
  1269. struct drm_mm *mm = (struct drm_mm *)adev->mman.bdev.man[ttm_pl].priv;
  1270. struct ttm_bo_global *glob = adev->mman.bdev.glob;
  1271. struct drm_printer p = drm_seq_file_printer(m);
  1272. spin_lock(&glob->lru_lock);
  1273. drm_mm_print(mm, &p);
  1274. spin_unlock(&glob->lru_lock);
  1275. switch (ttm_pl) {
  1276. case TTM_PL_VRAM:
  1277. seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
  1278. adev->mman.bdev.man[ttm_pl].size,
  1279. (u64)atomic64_read(&adev->vram_usage) >> 20,
  1280. (u64)atomic64_read(&adev->vram_vis_usage) >> 20);
  1281. break;
  1282. case TTM_PL_TT:
  1283. amdgpu_gtt_mgr_print(m, &adev->mman.bdev.man[TTM_PL_TT]);
  1284. break;
  1285. }
  1286. return 0;
  1287. }
  1288. static int ttm_pl_vram = TTM_PL_VRAM;
  1289. static int ttm_pl_tt = TTM_PL_TT;
  1290. static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
  1291. {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram},
  1292. {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
  1293. {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
  1294. #ifdef CONFIG_SWIOTLB
  1295. {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
  1296. #endif
  1297. };
  1298. static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
  1299. size_t size, loff_t *pos)
  1300. {
  1301. struct amdgpu_device *adev = file_inode(f)->i_private;
  1302. ssize_t result = 0;
  1303. int r;
  1304. if (size & 0x3 || *pos & 0x3)
  1305. return -EINVAL;
  1306. if (*pos >= adev->mc.mc_vram_size)
  1307. return -ENXIO;
  1308. while (size) {
  1309. unsigned long flags;
  1310. uint32_t value;
  1311. if (*pos >= adev->mc.mc_vram_size)
  1312. return result;
  1313. spin_lock_irqsave(&adev->mmio_idx_lock, flags);
  1314. WREG32(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
  1315. WREG32(mmMM_INDEX_HI, *pos >> 31);
  1316. value = RREG32(mmMM_DATA);
  1317. spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
  1318. r = put_user(value, (uint32_t *)buf);
  1319. if (r)
  1320. return r;
  1321. result += 4;
  1322. buf += 4;
  1323. *pos += 4;
  1324. size -= 4;
  1325. }
  1326. return result;
  1327. }
  1328. static const struct file_operations amdgpu_ttm_vram_fops = {
  1329. .owner = THIS_MODULE,
  1330. .read = amdgpu_ttm_vram_read,
  1331. .llseek = default_llseek
  1332. };
  1333. #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
  1334. static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
  1335. size_t size, loff_t *pos)
  1336. {
  1337. struct amdgpu_device *adev = file_inode(f)->i_private;
  1338. ssize_t result = 0;
  1339. int r;
  1340. while (size) {
  1341. loff_t p = *pos / PAGE_SIZE;
  1342. unsigned off = *pos & ~PAGE_MASK;
  1343. size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
  1344. struct page *page;
  1345. void *ptr;
  1346. if (p >= adev->gart.num_cpu_pages)
  1347. return result;
  1348. page = adev->gart.pages[p];
  1349. if (page) {
  1350. ptr = kmap(page);
  1351. ptr += off;
  1352. r = copy_to_user(buf, ptr, cur_size);
  1353. kunmap(adev->gart.pages[p]);
  1354. } else
  1355. r = clear_user(buf, cur_size);
  1356. if (r)
  1357. return -EFAULT;
  1358. result += cur_size;
  1359. buf += cur_size;
  1360. *pos += cur_size;
  1361. size -= cur_size;
  1362. }
  1363. return result;
  1364. }
  1365. static const struct file_operations amdgpu_ttm_gtt_fops = {
  1366. .owner = THIS_MODULE,
  1367. .read = amdgpu_ttm_gtt_read,
  1368. .llseek = default_llseek
  1369. };
  1370. #endif
  1371. #endif
  1372. static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
  1373. {
  1374. #if defined(CONFIG_DEBUG_FS)
  1375. unsigned count;
  1376. struct drm_minor *minor = adev->ddev->primary;
  1377. struct dentry *ent, *root = minor->debugfs_root;
  1378. ent = debugfs_create_file("amdgpu_vram", S_IFREG | S_IRUGO, root,
  1379. adev, &amdgpu_ttm_vram_fops);
  1380. if (IS_ERR(ent))
  1381. return PTR_ERR(ent);
  1382. i_size_write(ent->d_inode, adev->mc.mc_vram_size);
  1383. adev->mman.vram = ent;
  1384. #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
  1385. ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root,
  1386. adev, &amdgpu_ttm_gtt_fops);
  1387. if (IS_ERR(ent))
  1388. return PTR_ERR(ent);
  1389. i_size_write(ent->d_inode, adev->mc.gtt_size);
  1390. adev->mman.gtt = ent;
  1391. #endif
  1392. count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
  1393. #ifdef CONFIG_SWIOTLB
  1394. if (!swiotlb_nr_tbl())
  1395. --count;
  1396. #endif
  1397. return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
  1398. #else
  1399. return 0;
  1400. #endif
  1401. }
  1402. static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
  1403. {
  1404. #if defined(CONFIG_DEBUG_FS)
  1405. debugfs_remove(adev->mman.vram);
  1406. adev->mman.vram = NULL;
  1407. #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
  1408. debugfs_remove(adev->mman.gtt);
  1409. adev->mman.gtt = NULL;
  1410. #endif
  1411. #endif
  1412. }