amdgpu_ttm.c 54 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139
  1. /*
  2. * Copyright 2009 Jerome Glisse.
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the
  7. * "Software"), to deal in the Software without restriction, including
  8. * without limitation the rights to use, copy, modify, merge, publish,
  9. * distribute, sub license, and/or sell copies of the Software, and to
  10. * permit persons to whom the Software is furnished to do so, subject to
  11. * the following conditions:
  12. *
  13. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20. *
  21. * The above copyright notice and this permission notice (including the
  22. * next paragraph) shall be included in all copies or substantial portions
  23. * of the Software.
  24. *
  25. */
  26. /*
  27. * Authors:
  28. * Jerome Glisse <glisse@freedesktop.org>
  29. * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
  30. * Dave Airlie
  31. */
  32. #include <drm/ttm/ttm_bo_api.h>
  33. #include <drm/ttm/ttm_bo_driver.h>
  34. #include <drm/ttm/ttm_placement.h>
  35. #include <drm/ttm/ttm_module.h>
  36. #include <drm/ttm/ttm_page_alloc.h>
  37. #include <drm/drmP.h>
  38. #include <drm/amdgpu_drm.h>
  39. #include <linux/seq_file.h>
  40. #include <linux/slab.h>
  41. #include <linux/swiotlb.h>
  42. #include <linux/swap.h>
  43. #include <linux/pagemap.h>
  44. #include <linux/debugfs.h>
  45. #include <linux/iommu.h>
  46. #include "amdgpu.h"
  47. #include "amdgpu_object.h"
  48. #include "amdgpu_trace.h"
  49. #include "amdgpu_amdkfd.h"
  50. #include "bif/bif_4_1_d.h"
  51. #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
  52. static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
  53. struct ttm_mem_reg *mem, unsigned num_pages,
  54. uint64_t offset, unsigned window,
  55. struct amdgpu_ring *ring,
  56. uint64_t *addr);
  57. static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
  58. static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
  59. /*
  60. * Global memory.
  61. */
  62. static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref)
  63. {
  64. return ttm_mem_global_init(ref->object);
  65. }
  66. static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
  67. {
  68. ttm_mem_global_release(ref->object);
  69. }
  70. static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
  71. {
  72. struct drm_global_reference *global_ref;
  73. struct amdgpu_ring *ring;
  74. struct drm_sched_rq *rq;
  75. int r;
  76. adev->mman.mem_global_referenced = false;
  77. global_ref = &adev->mman.mem_global_ref;
  78. global_ref->global_type = DRM_GLOBAL_TTM_MEM;
  79. global_ref->size = sizeof(struct ttm_mem_global);
  80. global_ref->init = &amdgpu_ttm_mem_global_init;
  81. global_ref->release = &amdgpu_ttm_mem_global_release;
  82. r = drm_global_item_ref(global_ref);
  83. if (r) {
  84. DRM_ERROR("Failed setting up TTM memory accounting "
  85. "subsystem.\n");
  86. goto error_mem;
  87. }
  88. adev->mman.bo_global_ref.mem_glob =
  89. adev->mman.mem_global_ref.object;
  90. global_ref = &adev->mman.bo_global_ref.ref;
  91. global_ref->global_type = DRM_GLOBAL_TTM_BO;
  92. global_ref->size = sizeof(struct ttm_bo_global);
  93. global_ref->init = &ttm_bo_global_init;
  94. global_ref->release = &ttm_bo_global_release;
  95. r = drm_global_item_ref(global_ref);
  96. if (r) {
  97. DRM_ERROR("Failed setting up TTM BO subsystem.\n");
  98. goto error_bo;
  99. }
  100. mutex_init(&adev->mman.gtt_window_lock);
  101. ring = adev->mman.buffer_funcs_ring;
  102. rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
  103. r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
  104. rq, amdgpu_sched_jobs, NULL);
  105. if (r) {
  106. DRM_ERROR("Failed setting up TTM BO move run queue.\n");
  107. goto error_entity;
  108. }
  109. adev->mman.mem_global_referenced = true;
  110. return 0;
  111. error_entity:
  112. drm_global_item_unref(&adev->mman.bo_global_ref.ref);
  113. error_bo:
  114. drm_global_item_unref(&adev->mman.mem_global_ref);
  115. error_mem:
  116. return r;
  117. }
  118. static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
  119. {
  120. if (adev->mman.mem_global_referenced) {
  121. drm_sched_entity_fini(adev->mman.entity.sched,
  122. &adev->mman.entity);
  123. mutex_destroy(&adev->mman.gtt_window_lock);
  124. drm_global_item_unref(&adev->mman.bo_global_ref.ref);
  125. drm_global_item_unref(&adev->mman.mem_global_ref);
  126. adev->mman.mem_global_referenced = false;
  127. }
  128. }
  129. static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
  130. {
  131. return 0;
  132. }
  133. static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
  134. struct ttm_mem_type_manager *man)
  135. {
  136. struct amdgpu_device *adev;
  137. adev = amdgpu_ttm_adev(bdev);
  138. switch (type) {
  139. case TTM_PL_SYSTEM:
  140. /* System memory */
  141. man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
  142. man->available_caching = TTM_PL_MASK_CACHING;
  143. man->default_caching = TTM_PL_FLAG_CACHED;
  144. break;
  145. case TTM_PL_TT:
  146. man->func = &amdgpu_gtt_mgr_func;
  147. man->gpu_offset = adev->gmc.gart_start;
  148. man->available_caching = TTM_PL_MASK_CACHING;
  149. man->default_caching = TTM_PL_FLAG_CACHED;
  150. man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
  151. break;
  152. case TTM_PL_VRAM:
  153. /* "On-card" video ram */
  154. man->func = &amdgpu_vram_mgr_func;
  155. man->gpu_offset = adev->gmc.vram_start;
  156. man->flags = TTM_MEMTYPE_FLAG_FIXED |
  157. TTM_MEMTYPE_FLAG_MAPPABLE;
  158. man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
  159. man->default_caching = TTM_PL_FLAG_WC;
  160. break;
  161. case AMDGPU_PL_GDS:
  162. case AMDGPU_PL_GWS:
  163. case AMDGPU_PL_OA:
  164. /* On-chip GDS memory*/
  165. man->func = &ttm_bo_manager_func;
  166. man->gpu_offset = 0;
  167. man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA;
  168. man->available_caching = TTM_PL_FLAG_UNCACHED;
  169. man->default_caching = TTM_PL_FLAG_UNCACHED;
  170. break;
  171. default:
  172. DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
  173. return -EINVAL;
  174. }
  175. return 0;
  176. }
  177. static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
  178. struct ttm_placement *placement)
  179. {
  180. struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
  181. struct amdgpu_bo *abo;
  182. static const struct ttm_place placements = {
  183. .fpfn = 0,
  184. .lpfn = 0,
  185. .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
  186. };
  187. if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
  188. placement->placement = &placements;
  189. placement->busy_placement = &placements;
  190. placement->num_placement = 1;
  191. placement->num_busy_placement = 1;
  192. return;
  193. }
  194. abo = ttm_to_amdgpu_bo(bo);
  195. switch (bo->mem.mem_type) {
  196. case TTM_PL_VRAM:
  197. if (!adev->mman.buffer_funcs_enabled) {
  198. amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
  199. } else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
  200. !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
  201. unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
  202. struct drm_mm_node *node = bo->mem.mm_node;
  203. unsigned long pages_left;
  204. for (pages_left = bo->mem.num_pages;
  205. pages_left;
  206. pages_left -= node->size, node++) {
  207. if (node->start < fpfn)
  208. break;
  209. }
  210. if (!pages_left)
  211. goto gtt;
  212. /* Try evicting to the CPU inaccessible part of VRAM
  213. * first, but only set GTT as busy placement, so this
  214. * BO will be evicted to GTT rather than causing other
  215. * BOs to be evicted from VRAM
  216. */
  217. amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
  218. AMDGPU_GEM_DOMAIN_GTT);
  219. abo->placements[0].fpfn = fpfn;
  220. abo->placements[0].lpfn = 0;
  221. abo->placement.busy_placement = &abo->placements[1];
  222. abo->placement.num_busy_placement = 1;
  223. } else {
  224. gtt:
  225. amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
  226. }
  227. break;
  228. case TTM_PL_TT:
  229. default:
  230. amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
  231. }
  232. *placement = abo->placement;
  233. }
  234. static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
  235. {
  236. struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
  237. /*
  238. * Don't verify access for KFD BOs. They don't have a GEM
  239. * object associated with them.
  240. */
  241. if (abo->kfd_bo)
  242. return 0;
  243. if (amdgpu_ttm_tt_get_usermm(bo->ttm))
  244. return -EPERM;
  245. return drm_vma_node_verify_access(&abo->gem_base.vma_node,
  246. filp->private_data);
  247. }
  248. static void amdgpu_move_null(struct ttm_buffer_object *bo,
  249. struct ttm_mem_reg *new_mem)
  250. {
  251. struct ttm_mem_reg *old_mem = &bo->mem;
  252. BUG_ON(old_mem->mm_node != NULL);
  253. *old_mem = *new_mem;
  254. new_mem->mm_node = NULL;
  255. }
  256. static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
  257. struct drm_mm_node *mm_node,
  258. struct ttm_mem_reg *mem)
  259. {
  260. uint64_t addr = 0;
  261. if (mem->mem_type != TTM_PL_TT || amdgpu_gtt_mgr_has_gart_addr(mem)) {
  262. addr = mm_node->start << PAGE_SHIFT;
  263. addr += bo->bdev->man[mem->mem_type].gpu_offset;
  264. }
  265. return addr;
  266. }
  267. /**
  268. * amdgpu_find_mm_node - Helper function finds the drm_mm_node
  269. * corresponding to @offset. It also modifies the offset to be
  270. * within the drm_mm_node returned
  271. */
  272. static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
  273. unsigned long *offset)
  274. {
  275. struct drm_mm_node *mm_node = mem->mm_node;
  276. while (*offset >= (mm_node->size << PAGE_SHIFT)) {
  277. *offset -= (mm_node->size << PAGE_SHIFT);
  278. ++mm_node;
  279. }
  280. return mm_node;
  281. }
  282. /**
  283. * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
  284. *
  285. * The function copies @size bytes from {src->mem + src->offset} to
  286. * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
  287. * move and different for a BO to BO copy.
  288. *
  289. * @f: Returns the last fence if multiple jobs are submitted.
  290. */
  291. int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
  292. struct amdgpu_copy_mem *src,
  293. struct amdgpu_copy_mem *dst,
  294. uint64_t size,
  295. struct reservation_object *resv,
  296. struct dma_fence **f)
  297. {
  298. struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
  299. struct drm_mm_node *src_mm, *dst_mm;
  300. uint64_t src_node_start, dst_node_start, src_node_size,
  301. dst_node_size, src_page_offset, dst_page_offset;
  302. struct dma_fence *fence = NULL;
  303. int r = 0;
  304. const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
  305. AMDGPU_GPU_PAGE_SIZE);
  306. if (!adev->mman.buffer_funcs_enabled) {
  307. DRM_ERROR("Trying to move memory with ring turned off.\n");
  308. return -EINVAL;
  309. }
  310. src_mm = amdgpu_find_mm_node(src->mem, &src->offset);
  311. src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) +
  312. src->offset;
  313. src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset;
  314. src_page_offset = src_node_start & (PAGE_SIZE - 1);
  315. dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset);
  316. dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) +
  317. dst->offset;
  318. dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset;
  319. dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
  320. mutex_lock(&adev->mman.gtt_window_lock);
  321. while (size) {
  322. unsigned long cur_size;
  323. uint64_t from = src_node_start, to = dst_node_start;
  324. struct dma_fence *next;
  325. /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
  326. * begins at an offset, then adjust the size accordingly
  327. */
  328. cur_size = min3(min(src_node_size, dst_node_size), size,
  329. GTT_MAX_BYTES);
  330. if (cur_size + src_page_offset > GTT_MAX_BYTES ||
  331. cur_size + dst_page_offset > GTT_MAX_BYTES)
  332. cur_size -= max(src_page_offset, dst_page_offset);
  333. /* Map only what needs to be accessed. Map src to window 0 and
  334. * dst to window 1
  335. */
  336. if (src->mem->mem_type == TTM_PL_TT &&
  337. !amdgpu_gtt_mgr_has_gart_addr(src->mem)) {
  338. r = amdgpu_map_buffer(src->bo, src->mem,
  339. PFN_UP(cur_size + src_page_offset),
  340. src_node_start, 0, ring,
  341. &from);
  342. if (r)
  343. goto error;
  344. /* Adjust the offset because amdgpu_map_buffer returns
  345. * start of mapped page
  346. */
  347. from += src_page_offset;
  348. }
  349. if (dst->mem->mem_type == TTM_PL_TT &&
  350. !amdgpu_gtt_mgr_has_gart_addr(dst->mem)) {
  351. r = amdgpu_map_buffer(dst->bo, dst->mem,
  352. PFN_UP(cur_size + dst_page_offset),
  353. dst_node_start, 1, ring,
  354. &to);
  355. if (r)
  356. goto error;
  357. to += dst_page_offset;
  358. }
  359. r = amdgpu_copy_buffer(ring, from, to, cur_size,
  360. resv, &next, false, true);
  361. if (r)
  362. goto error;
  363. dma_fence_put(fence);
  364. fence = next;
  365. size -= cur_size;
  366. if (!size)
  367. break;
  368. src_node_size -= cur_size;
  369. if (!src_node_size) {
  370. src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm,
  371. src->mem);
  372. src_node_size = (src_mm->size << PAGE_SHIFT);
  373. } else {
  374. src_node_start += cur_size;
  375. src_page_offset = src_node_start & (PAGE_SIZE - 1);
  376. }
  377. dst_node_size -= cur_size;
  378. if (!dst_node_size) {
  379. dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm,
  380. dst->mem);
  381. dst_node_size = (dst_mm->size << PAGE_SHIFT);
  382. } else {
  383. dst_node_start += cur_size;
  384. dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
  385. }
  386. }
  387. error:
  388. mutex_unlock(&adev->mman.gtt_window_lock);
  389. if (f)
  390. *f = dma_fence_get(fence);
  391. dma_fence_put(fence);
  392. return r;
  393. }
  394. static int amdgpu_move_blit(struct ttm_buffer_object *bo,
  395. bool evict, bool no_wait_gpu,
  396. struct ttm_mem_reg *new_mem,
  397. struct ttm_mem_reg *old_mem)
  398. {
  399. struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
  400. struct amdgpu_copy_mem src, dst;
  401. struct dma_fence *fence = NULL;
  402. int r;
  403. src.bo = bo;
  404. dst.bo = bo;
  405. src.mem = old_mem;
  406. dst.mem = new_mem;
  407. src.offset = 0;
  408. dst.offset = 0;
  409. r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
  410. new_mem->num_pages << PAGE_SHIFT,
  411. bo->resv, &fence);
  412. if (r)
  413. goto error;
  414. r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
  415. dma_fence_put(fence);
  416. return r;
  417. error:
  418. if (fence)
  419. dma_fence_wait(fence, false);
  420. dma_fence_put(fence);
  421. return r;
  422. }
  423. static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
  424. struct ttm_operation_ctx *ctx,
  425. struct ttm_mem_reg *new_mem)
  426. {
  427. struct amdgpu_device *adev;
  428. struct ttm_mem_reg *old_mem = &bo->mem;
  429. struct ttm_mem_reg tmp_mem;
  430. struct ttm_place placements;
  431. struct ttm_placement placement;
  432. int r;
  433. adev = amdgpu_ttm_adev(bo->bdev);
  434. tmp_mem = *new_mem;
  435. tmp_mem.mm_node = NULL;
  436. placement.num_placement = 1;
  437. placement.placement = &placements;
  438. placement.num_busy_placement = 1;
  439. placement.busy_placement = &placements;
  440. placements.fpfn = 0;
  441. placements.lpfn = 0;
  442. placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
  443. r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
  444. if (unlikely(r)) {
  445. return r;
  446. }
  447. r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
  448. if (unlikely(r)) {
  449. goto out_cleanup;
  450. }
  451. r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx);
  452. if (unlikely(r)) {
  453. goto out_cleanup;
  454. }
  455. r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, &tmp_mem, old_mem);
  456. if (unlikely(r)) {
  457. goto out_cleanup;
  458. }
  459. r = ttm_bo_move_ttm(bo, ctx, new_mem);
  460. out_cleanup:
  461. ttm_bo_mem_put(bo, &tmp_mem);
  462. return r;
  463. }
  464. static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
  465. struct ttm_operation_ctx *ctx,
  466. struct ttm_mem_reg *new_mem)
  467. {
  468. struct amdgpu_device *adev;
  469. struct ttm_mem_reg *old_mem = &bo->mem;
  470. struct ttm_mem_reg tmp_mem;
  471. struct ttm_placement placement;
  472. struct ttm_place placements;
  473. int r;
  474. adev = amdgpu_ttm_adev(bo->bdev);
  475. tmp_mem = *new_mem;
  476. tmp_mem.mm_node = NULL;
  477. placement.num_placement = 1;
  478. placement.placement = &placements;
  479. placement.num_busy_placement = 1;
  480. placement.busy_placement = &placements;
  481. placements.fpfn = 0;
  482. placements.lpfn = 0;
  483. placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
  484. r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
  485. if (unlikely(r)) {
  486. return r;
  487. }
  488. r = ttm_bo_move_ttm(bo, ctx, &tmp_mem);
  489. if (unlikely(r)) {
  490. goto out_cleanup;
  491. }
  492. r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, new_mem, old_mem);
  493. if (unlikely(r)) {
  494. goto out_cleanup;
  495. }
  496. out_cleanup:
  497. ttm_bo_mem_put(bo, &tmp_mem);
  498. return r;
  499. }
  500. static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
  501. struct ttm_operation_ctx *ctx,
  502. struct ttm_mem_reg *new_mem)
  503. {
  504. struct amdgpu_device *adev;
  505. struct amdgpu_bo *abo;
  506. struct ttm_mem_reg *old_mem = &bo->mem;
  507. int r;
  508. /* Can't move a pinned BO */
  509. abo = ttm_to_amdgpu_bo(bo);
  510. if (WARN_ON_ONCE(abo->pin_count > 0))
  511. return -EINVAL;
  512. adev = amdgpu_ttm_adev(bo->bdev);
  513. if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
  514. amdgpu_move_null(bo, new_mem);
  515. return 0;
  516. }
  517. if ((old_mem->mem_type == TTM_PL_TT &&
  518. new_mem->mem_type == TTM_PL_SYSTEM) ||
  519. (old_mem->mem_type == TTM_PL_SYSTEM &&
  520. new_mem->mem_type == TTM_PL_TT)) {
  521. /* bind is enough */
  522. amdgpu_move_null(bo, new_mem);
  523. return 0;
  524. }
  525. if (!adev->mman.buffer_funcs_enabled)
  526. goto memcpy;
  527. if (old_mem->mem_type == TTM_PL_VRAM &&
  528. new_mem->mem_type == TTM_PL_SYSTEM) {
  529. r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem);
  530. } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
  531. new_mem->mem_type == TTM_PL_VRAM) {
  532. r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem);
  533. } else {
  534. r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu,
  535. new_mem, old_mem);
  536. }
  537. if (r) {
  538. memcpy:
  539. r = ttm_bo_move_memcpy(bo, ctx, new_mem);
  540. if (r) {
  541. return r;
  542. }
  543. }
  544. if (bo->type == ttm_bo_type_device &&
  545. new_mem->mem_type == TTM_PL_VRAM &&
  546. old_mem->mem_type != TTM_PL_VRAM) {
  547. /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
  548. * accesses the BO after it's moved.
  549. */
  550. abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
  551. }
  552. /* update statistics */
  553. atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
  554. return 0;
  555. }
  556. static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  557. {
  558. struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
  559. struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
  560. struct drm_mm_node *mm_node = mem->mm_node;
  561. mem->bus.addr = NULL;
  562. mem->bus.offset = 0;
  563. mem->bus.size = mem->num_pages << PAGE_SHIFT;
  564. mem->bus.base = 0;
  565. mem->bus.is_iomem = false;
  566. if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
  567. return -EINVAL;
  568. switch (mem->mem_type) {
  569. case TTM_PL_SYSTEM:
  570. /* system memory */
  571. return 0;
  572. case TTM_PL_TT:
  573. break;
  574. case TTM_PL_VRAM:
  575. mem->bus.offset = mem->start << PAGE_SHIFT;
  576. /* check if it's visible */
  577. if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size)
  578. return -EINVAL;
  579. /* Only physically contiguous buffers apply. In a contiguous
  580. * buffer, size of the first mm_node would match the number of
  581. * pages in ttm_mem_reg.
  582. */
  583. if (adev->mman.aper_base_kaddr &&
  584. (mm_node->size == mem->num_pages))
  585. mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
  586. mem->bus.offset;
  587. mem->bus.base = adev->gmc.aper_base;
  588. mem->bus.is_iomem = true;
  589. break;
  590. default:
  591. return -EINVAL;
  592. }
  593. return 0;
  594. }
  595. static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  596. {
  597. }
  598. static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
  599. unsigned long page_offset)
  600. {
  601. struct drm_mm_node *mm;
  602. unsigned long offset = (page_offset << PAGE_SHIFT);
  603. mm = amdgpu_find_mm_node(&bo->mem, &offset);
  604. return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
  605. (offset >> PAGE_SHIFT);
  606. }
  607. /*
  608. * TTM backend functions.
  609. */
  610. struct amdgpu_ttm_gup_task_list {
  611. struct list_head list;
  612. struct task_struct *task;
  613. };
  614. struct amdgpu_ttm_tt {
  615. struct ttm_dma_tt ttm;
  616. u64 offset;
  617. uint64_t userptr;
  618. struct mm_struct *usermm;
  619. uint32_t userflags;
  620. spinlock_t guptasklock;
  621. struct list_head guptasks;
  622. atomic_t mmu_invalidations;
  623. uint32_t last_set_pages;
  624. };
  625. int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
  626. {
  627. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  628. unsigned int flags = 0;
  629. unsigned pinned = 0;
  630. int r;
  631. if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
  632. flags |= FOLL_WRITE;
  633. down_read(&current->mm->mmap_sem);
  634. if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
  635. /* check that we only use anonymous memory
  636. to prevent problems with writeback */
  637. unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
  638. struct vm_area_struct *vma;
  639. vma = find_vma(gtt->usermm, gtt->userptr);
  640. if (!vma || vma->vm_file || vma->vm_end < end) {
  641. up_read(&current->mm->mmap_sem);
  642. return -EPERM;
  643. }
  644. }
  645. do {
  646. unsigned num_pages = ttm->num_pages - pinned;
  647. uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
  648. struct page **p = pages + pinned;
  649. struct amdgpu_ttm_gup_task_list guptask;
  650. guptask.task = current;
  651. spin_lock(&gtt->guptasklock);
  652. list_add(&guptask.list, &gtt->guptasks);
  653. spin_unlock(&gtt->guptasklock);
  654. r = get_user_pages(userptr, num_pages, flags, p, NULL);
  655. spin_lock(&gtt->guptasklock);
  656. list_del(&guptask.list);
  657. spin_unlock(&gtt->guptasklock);
  658. if (r < 0)
  659. goto release_pages;
  660. pinned += r;
  661. } while (pinned < ttm->num_pages);
  662. up_read(&current->mm->mmap_sem);
  663. return 0;
  664. release_pages:
  665. release_pages(pages, pinned);
  666. up_read(&current->mm->mmap_sem);
  667. return r;
  668. }
  669. void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
  670. {
  671. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  672. unsigned i;
  673. gtt->last_set_pages = atomic_read(&gtt->mmu_invalidations);
  674. for (i = 0; i < ttm->num_pages; ++i) {
  675. if (ttm->pages[i])
  676. put_page(ttm->pages[i]);
  677. ttm->pages[i] = pages ? pages[i] : NULL;
  678. }
  679. }
  680. void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
  681. {
  682. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  683. unsigned i;
  684. for (i = 0; i < ttm->num_pages; ++i) {
  685. struct page *page = ttm->pages[i];
  686. if (!page)
  687. continue;
  688. if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
  689. set_page_dirty(page);
  690. mark_page_accessed(page);
  691. }
  692. }
  693. /* prepare the sg table with the user pages */
  694. static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
  695. {
  696. struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
  697. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  698. unsigned nents;
  699. int r;
  700. int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
  701. enum dma_data_direction direction = write ?
  702. DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
  703. r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
  704. ttm->num_pages << PAGE_SHIFT,
  705. GFP_KERNEL);
  706. if (r)
  707. goto release_sg;
  708. r = -ENOMEM;
  709. nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
  710. if (nents != ttm->sg->nents)
  711. goto release_sg;
  712. drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
  713. gtt->ttm.dma_address, ttm->num_pages);
  714. return 0;
  715. release_sg:
  716. kfree(ttm->sg);
  717. return r;
  718. }
  719. static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
  720. {
  721. struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
  722. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  723. int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
  724. enum dma_data_direction direction = write ?
  725. DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
  726. /* double check that we don't free the table twice */
  727. if (!ttm->sg->sgl)
  728. return;
  729. /* free the sg table and pages again */
  730. dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
  731. amdgpu_ttm_tt_mark_user_pages(ttm);
  732. sg_free_table(ttm->sg);
  733. }
  734. static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
  735. struct ttm_mem_reg *bo_mem)
  736. {
  737. struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
  738. struct amdgpu_ttm_tt *gtt = (void*)ttm;
  739. uint64_t flags;
  740. int r = 0;
  741. if (gtt->userptr) {
  742. r = amdgpu_ttm_tt_pin_userptr(ttm);
  743. if (r) {
  744. DRM_ERROR("failed to pin userptr\n");
  745. return r;
  746. }
  747. }
  748. if (!ttm->num_pages) {
  749. WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
  750. ttm->num_pages, bo_mem, ttm);
  751. }
  752. if (bo_mem->mem_type == AMDGPU_PL_GDS ||
  753. bo_mem->mem_type == AMDGPU_PL_GWS ||
  754. bo_mem->mem_type == AMDGPU_PL_OA)
  755. return -EINVAL;
  756. if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
  757. gtt->offset = AMDGPU_BO_INVALID_OFFSET;
  758. return 0;
  759. }
  760. flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
  761. gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
  762. r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
  763. ttm->pages, gtt->ttm.dma_address, flags);
  764. if (r)
  765. DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
  766. ttm->num_pages, gtt->offset);
  767. return r;
  768. }
  769. int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
  770. {
  771. struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
  772. struct ttm_operation_ctx ctx = { false, false };
  773. struct amdgpu_ttm_tt *gtt = (void*)bo->ttm;
  774. struct ttm_mem_reg tmp;
  775. struct ttm_placement placement;
  776. struct ttm_place placements;
  777. uint64_t flags;
  778. int r;
  779. if (bo->mem.mem_type != TTM_PL_TT ||
  780. amdgpu_gtt_mgr_has_gart_addr(&bo->mem))
  781. return 0;
  782. tmp = bo->mem;
  783. tmp.mm_node = NULL;
  784. placement.num_placement = 1;
  785. placement.placement = &placements;
  786. placement.num_busy_placement = 1;
  787. placement.busy_placement = &placements;
  788. placements.fpfn = 0;
  789. placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
  790. placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
  791. TTM_PL_FLAG_TT;
  792. r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
  793. if (unlikely(r))
  794. return r;
  795. flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
  796. gtt->offset = (u64)tmp.start << PAGE_SHIFT;
  797. r = amdgpu_gart_bind(adev, gtt->offset, bo->ttm->num_pages,
  798. bo->ttm->pages, gtt->ttm.dma_address, flags);
  799. if (unlikely(r)) {
  800. ttm_bo_mem_put(bo, &tmp);
  801. return r;
  802. }
  803. ttm_bo_mem_put(bo, &bo->mem);
  804. bo->mem = tmp;
  805. bo->offset = (bo->mem.start << PAGE_SHIFT) +
  806. bo->bdev->man[bo->mem.mem_type].gpu_offset;
  807. return 0;
  808. }
  809. int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
  810. {
  811. struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
  812. struct amdgpu_ttm_tt *gtt = (void *)tbo->ttm;
  813. uint64_t flags;
  814. int r;
  815. if (!gtt)
  816. return 0;
  817. flags = amdgpu_ttm_tt_pte_flags(adev, &gtt->ttm.ttm, &tbo->mem);
  818. r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
  819. gtt->ttm.ttm.pages, gtt->ttm.dma_address, flags);
  820. if (r)
  821. DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
  822. gtt->ttm.ttm.num_pages, gtt->offset);
  823. return r;
  824. }
  825. static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
  826. {
  827. struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
  828. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  829. int r;
  830. if (gtt->userptr)
  831. amdgpu_ttm_tt_unpin_userptr(ttm);
  832. if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
  833. return 0;
  834. /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
  835. r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
  836. if (r)
  837. DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
  838. gtt->ttm.ttm.num_pages, gtt->offset);
  839. return r;
  840. }
  841. static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
  842. {
  843. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  844. ttm_dma_tt_fini(&gtt->ttm);
  845. kfree(gtt);
  846. }
  847. static struct ttm_backend_func amdgpu_backend_func = {
  848. .bind = &amdgpu_ttm_backend_bind,
  849. .unbind = &amdgpu_ttm_backend_unbind,
  850. .destroy = &amdgpu_ttm_backend_destroy,
  851. };
  852. static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
  853. unsigned long size, uint32_t page_flags)
  854. {
  855. struct amdgpu_device *adev;
  856. struct amdgpu_ttm_tt *gtt;
  857. adev = amdgpu_ttm_adev(bdev);
  858. gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
  859. if (gtt == NULL) {
  860. return NULL;
  861. }
  862. gtt->ttm.ttm.func = &amdgpu_backend_func;
  863. if (ttm_sg_tt_init(&gtt->ttm, bdev, size, page_flags)) {
  864. kfree(gtt);
  865. return NULL;
  866. }
  867. return &gtt->ttm.ttm;
  868. }
  869. static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
  870. struct ttm_operation_ctx *ctx)
  871. {
  872. struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
  873. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  874. bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
  875. if (gtt && gtt->userptr) {
  876. ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
  877. if (!ttm->sg)
  878. return -ENOMEM;
  879. ttm->page_flags |= TTM_PAGE_FLAG_SG;
  880. ttm->state = tt_unbound;
  881. return 0;
  882. }
  883. if (slave && ttm->sg) {
  884. drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
  885. gtt->ttm.dma_address,
  886. ttm->num_pages);
  887. ttm->state = tt_unbound;
  888. return 0;
  889. }
  890. #ifdef CONFIG_SWIOTLB
  891. if (adev->need_swiotlb && swiotlb_nr_tbl()) {
  892. return ttm_dma_populate(&gtt->ttm, adev->dev, ctx);
  893. }
  894. #endif
  895. return ttm_populate_and_map_pages(adev->dev, &gtt->ttm, ctx);
  896. }
  897. static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
  898. {
  899. struct amdgpu_device *adev;
  900. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  901. bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
  902. if (gtt && gtt->userptr) {
  903. amdgpu_ttm_tt_set_user_pages(ttm, NULL);
  904. kfree(ttm->sg);
  905. ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
  906. return;
  907. }
  908. if (slave)
  909. return;
  910. adev = amdgpu_ttm_adev(ttm->bdev);
  911. #ifdef CONFIG_SWIOTLB
  912. if (adev->need_swiotlb && swiotlb_nr_tbl()) {
  913. ttm_dma_unpopulate(&gtt->ttm, adev->dev);
  914. return;
  915. }
  916. #endif
  917. ttm_unmap_and_unpopulate_pages(adev->dev, &gtt->ttm);
  918. }
  919. int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
  920. uint32_t flags)
  921. {
  922. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  923. if (gtt == NULL)
  924. return -EINVAL;
  925. gtt->userptr = addr;
  926. gtt->usermm = current->mm;
  927. gtt->userflags = flags;
  928. spin_lock_init(&gtt->guptasklock);
  929. INIT_LIST_HEAD(&gtt->guptasks);
  930. atomic_set(&gtt->mmu_invalidations, 0);
  931. gtt->last_set_pages = 0;
  932. return 0;
  933. }
  934. struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
  935. {
  936. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  937. if (gtt == NULL)
  938. return NULL;
  939. return gtt->usermm;
  940. }
  941. bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
  942. unsigned long end)
  943. {
  944. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  945. struct amdgpu_ttm_gup_task_list *entry;
  946. unsigned long size;
  947. if (gtt == NULL || !gtt->userptr)
  948. return false;
  949. size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
  950. if (gtt->userptr > end || gtt->userptr + size <= start)
  951. return false;
  952. spin_lock(&gtt->guptasklock);
  953. list_for_each_entry(entry, &gtt->guptasks, list) {
  954. if (entry->task == current) {
  955. spin_unlock(&gtt->guptasklock);
  956. return false;
  957. }
  958. }
  959. spin_unlock(&gtt->guptasklock);
  960. atomic_inc(&gtt->mmu_invalidations);
  961. return true;
  962. }
  963. bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
  964. int *last_invalidated)
  965. {
  966. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  967. int prev_invalidated = *last_invalidated;
  968. *last_invalidated = atomic_read(&gtt->mmu_invalidations);
  969. return prev_invalidated != *last_invalidated;
  970. }
  971. bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
  972. {
  973. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  974. if (gtt == NULL || !gtt->userptr)
  975. return false;
  976. return atomic_read(&gtt->mmu_invalidations) != gtt->last_set_pages;
  977. }
  978. bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
  979. {
  980. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  981. if (gtt == NULL)
  982. return false;
  983. return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
  984. }
  985. uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
  986. struct ttm_mem_reg *mem)
  987. {
  988. uint64_t flags = 0;
  989. if (mem && mem->mem_type != TTM_PL_SYSTEM)
  990. flags |= AMDGPU_PTE_VALID;
  991. if (mem && mem->mem_type == TTM_PL_TT) {
  992. flags |= AMDGPU_PTE_SYSTEM;
  993. if (ttm->caching_state == tt_cached)
  994. flags |= AMDGPU_PTE_SNOOPED;
  995. }
  996. flags |= adev->gart.gart_pte_flags;
  997. flags |= AMDGPU_PTE_READABLE;
  998. if (!amdgpu_ttm_tt_is_readonly(ttm))
  999. flags |= AMDGPU_PTE_WRITEABLE;
  1000. return flags;
  1001. }
  1002. static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
  1003. const struct ttm_place *place)
  1004. {
  1005. unsigned long num_pages = bo->mem.num_pages;
  1006. struct drm_mm_node *node = bo->mem.mm_node;
  1007. struct reservation_object_list *flist;
  1008. struct dma_fence *f;
  1009. int i;
  1010. /* If bo is a KFD BO, check if the bo belongs to the current process.
  1011. * If true, then return false as any KFD process needs all its BOs to
  1012. * be resident to run successfully
  1013. */
  1014. flist = reservation_object_get_list(bo->resv);
  1015. if (flist) {
  1016. for (i = 0; i < flist->shared_count; ++i) {
  1017. f = rcu_dereference_protected(flist->shared[i],
  1018. reservation_object_held(bo->resv));
  1019. if (amdkfd_fence_check_mm(f, current->mm))
  1020. return false;
  1021. }
  1022. }
  1023. switch (bo->mem.mem_type) {
  1024. case TTM_PL_TT:
  1025. return true;
  1026. case TTM_PL_VRAM:
  1027. /* Check each drm MM node individually */
  1028. while (num_pages) {
  1029. if (place->fpfn < (node->start + node->size) &&
  1030. !(place->lpfn && place->lpfn <= node->start))
  1031. return true;
  1032. num_pages -= node->size;
  1033. ++node;
  1034. }
  1035. return false;
  1036. default:
  1037. break;
  1038. }
  1039. return ttm_bo_eviction_valuable(bo, place);
  1040. }
  1041. static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
  1042. unsigned long offset,
  1043. void *buf, int len, int write)
  1044. {
  1045. struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
  1046. struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
  1047. struct drm_mm_node *nodes;
  1048. uint32_t value = 0;
  1049. int ret = 0;
  1050. uint64_t pos;
  1051. unsigned long flags;
  1052. if (bo->mem.mem_type != TTM_PL_VRAM)
  1053. return -EIO;
  1054. nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
  1055. pos = (nodes->start << PAGE_SHIFT) + offset;
  1056. while (len && pos < adev->gmc.mc_vram_size) {
  1057. uint64_t aligned_pos = pos & ~(uint64_t)3;
  1058. uint32_t bytes = 4 - (pos & 3);
  1059. uint32_t shift = (pos & 3) * 8;
  1060. uint32_t mask = 0xffffffff << shift;
  1061. if (len < bytes) {
  1062. mask &= 0xffffffff >> (bytes - len) * 8;
  1063. bytes = len;
  1064. }
  1065. spin_lock_irqsave(&adev->mmio_idx_lock, flags);
  1066. WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
  1067. WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
  1068. if (!write || mask != 0xffffffff)
  1069. value = RREG32_NO_KIQ(mmMM_DATA);
  1070. if (write) {
  1071. value &= ~mask;
  1072. value |= (*(uint32_t *)buf << shift) & mask;
  1073. WREG32_NO_KIQ(mmMM_DATA, value);
  1074. }
  1075. spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
  1076. if (!write) {
  1077. value = (value & mask) >> shift;
  1078. memcpy(buf, &value, bytes);
  1079. }
  1080. ret += bytes;
  1081. buf = (uint8_t *)buf + bytes;
  1082. pos += bytes;
  1083. len -= bytes;
  1084. if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) {
  1085. ++nodes;
  1086. pos = (nodes->start << PAGE_SHIFT);
  1087. }
  1088. }
  1089. return ret;
  1090. }
  1091. static struct ttm_bo_driver amdgpu_bo_driver = {
  1092. .ttm_tt_create = &amdgpu_ttm_tt_create,
  1093. .ttm_tt_populate = &amdgpu_ttm_tt_populate,
  1094. .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
  1095. .invalidate_caches = &amdgpu_invalidate_caches,
  1096. .init_mem_type = &amdgpu_init_mem_type,
  1097. .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
  1098. .evict_flags = &amdgpu_evict_flags,
  1099. .move = &amdgpu_bo_move,
  1100. .verify_access = &amdgpu_verify_access,
  1101. .move_notify = &amdgpu_bo_move_notify,
  1102. .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
  1103. .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
  1104. .io_mem_free = &amdgpu_ttm_io_mem_free,
  1105. .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
  1106. .access_memory = &amdgpu_ttm_access_memory
  1107. };
  1108. /*
  1109. * Firmware Reservation functions
  1110. */
  1111. /**
  1112. * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
  1113. *
  1114. * @adev: amdgpu_device pointer
  1115. *
  1116. * free fw reserved vram if it has been reserved.
  1117. */
  1118. static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
  1119. {
  1120. amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
  1121. NULL, &adev->fw_vram_usage.va);
  1122. }
  1123. /**
  1124. * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
  1125. *
  1126. * @adev: amdgpu_device pointer
  1127. *
  1128. * create bo vram reservation from fw.
  1129. */
  1130. static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
  1131. {
  1132. struct ttm_operation_ctx ctx = { false, false };
  1133. int r = 0;
  1134. int i;
  1135. u64 vram_size = adev->gmc.visible_vram_size;
  1136. u64 offset = adev->fw_vram_usage.start_offset;
  1137. u64 size = adev->fw_vram_usage.size;
  1138. struct amdgpu_bo *bo;
  1139. adev->fw_vram_usage.va = NULL;
  1140. adev->fw_vram_usage.reserved_bo = NULL;
  1141. if (adev->fw_vram_usage.size > 0 &&
  1142. adev->fw_vram_usage.size <= vram_size) {
  1143. r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
  1144. PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
  1145. AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
  1146. AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL,
  1147. &adev->fw_vram_usage.reserved_bo);
  1148. if (r)
  1149. goto error_create;
  1150. r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
  1151. if (r)
  1152. goto error_reserve;
  1153. /* remove the original mem node and create a new one at the
  1154. * request position
  1155. */
  1156. bo = adev->fw_vram_usage.reserved_bo;
  1157. offset = ALIGN(offset, PAGE_SIZE);
  1158. for (i = 0; i < bo->placement.num_placement; ++i) {
  1159. bo->placements[i].fpfn = offset >> PAGE_SHIFT;
  1160. bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
  1161. }
  1162. ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
  1163. r = ttm_bo_mem_space(&bo->tbo, &bo->placement,
  1164. &bo->tbo.mem, &ctx);
  1165. if (r)
  1166. goto error_pin;
  1167. r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
  1168. AMDGPU_GEM_DOMAIN_VRAM,
  1169. adev->fw_vram_usage.start_offset,
  1170. (adev->fw_vram_usage.start_offset +
  1171. adev->fw_vram_usage.size), NULL);
  1172. if (r)
  1173. goto error_pin;
  1174. r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
  1175. &adev->fw_vram_usage.va);
  1176. if (r)
  1177. goto error_kmap;
  1178. amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
  1179. }
  1180. return r;
  1181. error_kmap:
  1182. amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
  1183. error_pin:
  1184. amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
  1185. error_reserve:
  1186. amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
  1187. error_create:
  1188. adev->fw_vram_usage.va = NULL;
  1189. adev->fw_vram_usage.reserved_bo = NULL;
  1190. return r;
  1191. }
  1192. int amdgpu_ttm_init(struct amdgpu_device *adev)
  1193. {
  1194. uint64_t gtt_size;
  1195. int r;
  1196. u64 vis_vram_limit;
  1197. r = amdgpu_ttm_global_init(adev);
  1198. if (r) {
  1199. return r;
  1200. }
  1201. /* No others user of address space so set it to 0 */
  1202. r = ttm_bo_device_init(&adev->mman.bdev,
  1203. adev->mman.bo_global_ref.ref.object,
  1204. &amdgpu_bo_driver,
  1205. adev->ddev->anon_inode->i_mapping,
  1206. DRM_FILE_PAGE_OFFSET,
  1207. adev->need_dma32);
  1208. if (r) {
  1209. DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
  1210. return r;
  1211. }
  1212. adev->mman.initialized = true;
  1213. /* We opt to avoid OOM on system pages allocations */
  1214. adev->mman.bdev.no_retry = true;
  1215. r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
  1216. adev->gmc.real_vram_size >> PAGE_SHIFT);
  1217. if (r) {
  1218. DRM_ERROR("Failed initializing VRAM heap.\n");
  1219. return r;
  1220. }
  1221. /* Reduce size of CPU-visible VRAM if requested */
  1222. vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
  1223. if (amdgpu_vis_vram_limit > 0 &&
  1224. vis_vram_limit <= adev->gmc.visible_vram_size)
  1225. adev->gmc.visible_vram_size = vis_vram_limit;
  1226. /* Change the size here instead of the init above so only lpfn is affected */
  1227. amdgpu_ttm_set_buffer_funcs_status(adev, false);
  1228. #ifdef CONFIG_64BIT
  1229. adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
  1230. adev->gmc.visible_vram_size);
  1231. #endif
  1232. /*
  1233. *The reserved vram for firmware must be pinned to the specified
  1234. *place on the VRAM, so reserve it early.
  1235. */
  1236. r = amdgpu_ttm_fw_reserve_vram_init(adev);
  1237. if (r) {
  1238. return r;
  1239. }
  1240. r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
  1241. AMDGPU_GEM_DOMAIN_VRAM,
  1242. &adev->stolen_vga_memory,
  1243. NULL, NULL);
  1244. if (r)
  1245. return r;
  1246. DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
  1247. (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
  1248. if (amdgpu_gtt_size == -1) {
  1249. struct sysinfo si;
  1250. si_meminfo(&si);
  1251. gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
  1252. adev->gmc.mc_vram_size),
  1253. ((uint64_t)si.totalram * si.mem_unit * 3/4));
  1254. }
  1255. else
  1256. gtt_size = (uint64_t)amdgpu_gtt_size << 20;
  1257. r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
  1258. if (r) {
  1259. DRM_ERROR("Failed initializing GTT heap.\n");
  1260. return r;
  1261. }
  1262. DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
  1263. (unsigned)(gtt_size / (1024 * 1024)));
  1264. adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
  1265. adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
  1266. adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT;
  1267. adev->gds.gws.total_size = adev->gds.gws.total_size << AMDGPU_GWS_SHIFT;
  1268. adev->gds.gws.gfx_partition_size = adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT;
  1269. adev->gds.gws.cs_partition_size = adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT;
  1270. adev->gds.oa.total_size = adev->gds.oa.total_size << AMDGPU_OA_SHIFT;
  1271. adev->gds.oa.gfx_partition_size = adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT;
  1272. adev->gds.oa.cs_partition_size = adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT;
  1273. /* GDS Memory */
  1274. if (adev->gds.mem.total_size) {
  1275. r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
  1276. adev->gds.mem.total_size >> PAGE_SHIFT);
  1277. if (r) {
  1278. DRM_ERROR("Failed initializing GDS heap.\n");
  1279. return r;
  1280. }
  1281. }
  1282. /* GWS */
  1283. if (adev->gds.gws.total_size) {
  1284. r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
  1285. adev->gds.gws.total_size >> PAGE_SHIFT);
  1286. if (r) {
  1287. DRM_ERROR("Failed initializing gws heap.\n");
  1288. return r;
  1289. }
  1290. }
  1291. /* OA */
  1292. if (adev->gds.oa.total_size) {
  1293. r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
  1294. adev->gds.oa.total_size >> PAGE_SHIFT);
  1295. if (r) {
  1296. DRM_ERROR("Failed initializing oa heap.\n");
  1297. return r;
  1298. }
  1299. }
  1300. r = amdgpu_ttm_debugfs_init(adev);
  1301. if (r) {
  1302. DRM_ERROR("Failed to init debugfs\n");
  1303. return r;
  1304. }
  1305. return 0;
  1306. }
  1307. void amdgpu_ttm_fini(struct amdgpu_device *adev)
  1308. {
  1309. if (!adev->mman.initialized)
  1310. return;
  1311. amdgpu_ttm_debugfs_fini(adev);
  1312. amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
  1313. amdgpu_ttm_fw_reserve_vram_fini(adev);
  1314. if (adev->mman.aper_base_kaddr)
  1315. iounmap(adev->mman.aper_base_kaddr);
  1316. adev->mman.aper_base_kaddr = NULL;
  1317. ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
  1318. ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
  1319. if (adev->gds.mem.total_size)
  1320. ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS);
  1321. if (adev->gds.gws.total_size)
  1322. ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
  1323. if (adev->gds.oa.total_size)
  1324. ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
  1325. ttm_bo_device_release(&adev->mman.bdev);
  1326. amdgpu_ttm_global_fini(adev);
  1327. adev->mman.initialized = false;
  1328. DRM_INFO("amdgpu: ttm finalized\n");
  1329. }
  1330. /**
  1331. * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
  1332. *
  1333. * @adev: amdgpu_device pointer
  1334. * @enable: true when we can use buffer functions.
  1335. *
  1336. * Enable/disable use of buffer functions during suspend/resume. This should
  1337. * only be called at bootup or when userspace isn't running.
  1338. */
  1339. void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
  1340. {
  1341. struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
  1342. uint64_t size;
  1343. if (!adev->mman.initialized || adev->in_gpu_reset)
  1344. return;
  1345. /* this just adjusts TTM size idea, which sets lpfn to the correct value */
  1346. if (enable)
  1347. size = adev->gmc.real_vram_size;
  1348. else
  1349. size = adev->gmc.visible_vram_size;
  1350. man->size = size >> PAGE_SHIFT;
  1351. adev->mman.buffer_funcs_enabled = enable;
  1352. }
  1353. int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
  1354. {
  1355. struct drm_file *file_priv;
  1356. struct amdgpu_device *adev;
  1357. if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
  1358. return -EINVAL;
  1359. file_priv = filp->private_data;
  1360. adev = file_priv->minor->dev->dev_private;
  1361. if (adev == NULL)
  1362. return -EINVAL;
  1363. return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
  1364. }
  1365. static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
  1366. struct ttm_mem_reg *mem, unsigned num_pages,
  1367. uint64_t offset, unsigned window,
  1368. struct amdgpu_ring *ring,
  1369. uint64_t *addr)
  1370. {
  1371. struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
  1372. struct amdgpu_device *adev = ring->adev;
  1373. struct ttm_tt *ttm = bo->ttm;
  1374. struct amdgpu_job *job;
  1375. unsigned num_dw, num_bytes;
  1376. dma_addr_t *dma_address;
  1377. struct dma_fence *fence;
  1378. uint64_t src_addr, dst_addr;
  1379. uint64_t flags;
  1380. int r;
  1381. BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
  1382. AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
  1383. *addr = adev->gmc.gart_start;
  1384. *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
  1385. AMDGPU_GPU_PAGE_SIZE;
  1386. num_dw = adev->mman.buffer_funcs->copy_num_dw;
  1387. while (num_dw & 0x7)
  1388. num_dw++;
  1389. num_bytes = num_pages * 8;
  1390. r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
  1391. if (r)
  1392. return r;
  1393. src_addr = num_dw * 4;
  1394. src_addr += job->ibs[0].gpu_addr;
  1395. dst_addr = adev->gart.table_addr;
  1396. dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
  1397. amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
  1398. dst_addr, num_bytes);
  1399. amdgpu_ring_pad_ib(ring, &job->ibs[0]);
  1400. WARN_ON(job->ibs[0].length_dw > num_dw);
  1401. dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
  1402. flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
  1403. r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
  1404. &job->ibs[0].ptr[num_dw]);
  1405. if (r)
  1406. goto error_free;
  1407. r = amdgpu_job_submit(job, ring, &adev->mman.entity,
  1408. AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
  1409. if (r)
  1410. goto error_free;
  1411. dma_fence_put(fence);
  1412. return r;
  1413. error_free:
  1414. amdgpu_job_free(job);
  1415. return r;
  1416. }
  1417. int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
  1418. uint64_t dst_offset, uint32_t byte_count,
  1419. struct reservation_object *resv,
  1420. struct dma_fence **fence, bool direct_submit,
  1421. bool vm_needs_flush)
  1422. {
  1423. struct amdgpu_device *adev = ring->adev;
  1424. struct amdgpu_job *job;
  1425. uint32_t max_bytes;
  1426. unsigned num_loops, num_dw;
  1427. unsigned i;
  1428. int r;
  1429. if (direct_submit && !ring->ready) {
  1430. DRM_ERROR("Trying to move memory with ring turned off.\n");
  1431. return -EINVAL;
  1432. }
  1433. max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
  1434. num_loops = DIV_ROUND_UP(byte_count, max_bytes);
  1435. num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
  1436. /* for IB padding */
  1437. while (num_dw & 0x7)
  1438. num_dw++;
  1439. r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
  1440. if (r)
  1441. return r;
  1442. job->vm_needs_flush = vm_needs_flush;
  1443. if (resv) {
  1444. r = amdgpu_sync_resv(adev, &job->sync, resv,
  1445. AMDGPU_FENCE_OWNER_UNDEFINED,
  1446. false);
  1447. if (r) {
  1448. DRM_ERROR("sync failed (%d).\n", r);
  1449. goto error_free;
  1450. }
  1451. }
  1452. for (i = 0; i < num_loops; i++) {
  1453. uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
  1454. amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
  1455. dst_offset, cur_size_in_bytes);
  1456. src_offset += cur_size_in_bytes;
  1457. dst_offset += cur_size_in_bytes;
  1458. byte_count -= cur_size_in_bytes;
  1459. }
  1460. amdgpu_ring_pad_ib(ring, &job->ibs[0]);
  1461. WARN_ON(job->ibs[0].length_dw > num_dw);
  1462. if (direct_submit) {
  1463. r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
  1464. NULL, fence);
  1465. job->fence = dma_fence_get(*fence);
  1466. if (r)
  1467. DRM_ERROR("Error scheduling IBs (%d)\n", r);
  1468. amdgpu_job_free(job);
  1469. } else {
  1470. r = amdgpu_job_submit(job, ring, &adev->mman.entity,
  1471. AMDGPU_FENCE_OWNER_UNDEFINED, fence);
  1472. if (r)
  1473. goto error_free;
  1474. }
  1475. return r;
  1476. error_free:
  1477. amdgpu_job_free(job);
  1478. return r;
  1479. }
  1480. int amdgpu_fill_buffer(struct amdgpu_bo *bo,
  1481. uint32_t src_data,
  1482. struct reservation_object *resv,
  1483. struct dma_fence **fence)
  1484. {
  1485. struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
  1486. uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
  1487. struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
  1488. struct drm_mm_node *mm_node;
  1489. unsigned long num_pages;
  1490. unsigned int num_loops, num_dw;
  1491. struct amdgpu_job *job;
  1492. int r;
  1493. if (!adev->mman.buffer_funcs_enabled) {
  1494. DRM_ERROR("Trying to clear memory with ring turned off.\n");
  1495. return -EINVAL;
  1496. }
  1497. if (bo->tbo.mem.mem_type == TTM_PL_TT) {
  1498. r = amdgpu_ttm_alloc_gart(&bo->tbo);
  1499. if (r)
  1500. return r;
  1501. }
  1502. num_pages = bo->tbo.num_pages;
  1503. mm_node = bo->tbo.mem.mm_node;
  1504. num_loops = 0;
  1505. while (num_pages) {
  1506. uint32_t byte_count = mm_node->size << PAGE_SHIFT;
  1507. num_loops += DIV_ROUND_UP(byte_count, max_bytes);
  1508. num_pages -= mm_node->size;
  1509. ++mm_node;
  1510. }
  1511. num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
  1512. /* for IB padding */
  1513. num_dw += 64;
  1514. r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
  1515. if (r)
  1516. return r;
  1517. if (resv) {
  1518. r = amdgpu_sync_resv(adev, &job->sync, resv,
  1519. AMDGPU_FENCE_OWNER_UNDEFINED, false);
  1520. if (r) {
  1521. DRM_ERROR("sync failed (%d).\n", r);
  1522. goto error_free;
  1523. }
  1524. }
  1525. num_pages = bo->tbo.num_pages;
  1526. mm_node = bo->tbo.mem.mm_node;
  1527. while (num_pages) {
  1528. uint32_t byte_count = mm_node->size << PAGE_SHIFT;
  1529. uint64_t dst_addr;
  1530. dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
  1531. while (byte_count) {
  1532. uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
  1533. amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
  1534. dst_addr, cur_size_in_bytes);
  1535. dst_addr += cur_size_in_bytes;
  1536. byte_count -= cur_size_in_bytes;
  1537. }
  1538. num_pages -= mm_node->size;
  1539. ++mm_node;
  1540. }
  1541. amdgpu_ring_pad_ib(ring, &job->ibs[0]);
  1542. WARN_ON(job->ibs[0].length_dw > num_dw);
  1543. r = amdgpu_job_submit(job, ring, &adev->mman.entity,
  1544. AMDGPU_FENCE_OWNER_UNDEFINED, fence);
  1545. if (r)
  1546. goto error_free;
  1547. return 0;
  1548. error_free:
  1549. amdgpu_job_free(job);
  1550. return r;
  1551. }
  1552. #if defined(CONFIG_DEBUG_FS)
  1553. static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
  1554. {
  1555. struct drm_info_node *node = (struct drm_info_node *)m->private;
  1556. unsigned ttm_pl = *(int *)node->info_ent->data;
  1557. struct drm_device *dev = node->minor->dev;
  1558. struct amdgpu_device *adev = dev->dev_private;
  1559. struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl];
  1560. struct drm_printer p = drm_seq_file_printer(m);
  1561. man->func->debug(man, &p);
  1562. return 0;
  1563. }
  1564. static int ttm_pl_vram = TTM_PL_VRAM;
  1565. static int ttm_pl_tt = TTM_PL_TT;
  1566. static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
  1567. {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram},
  1568. {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
  1569. {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
  1570. #ifdef CONFIG_SWIOTLB
  1571. {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
  1572. #endif
  1573. };
  1574. static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
  1575. size_t size, loff_t *pos)
  1576. {
  1577. struct amdgpu_device *adev = file_inode(f)->i_private;
  1578. ssize_t result = 0;
  1579. int r;
  1580. if (size & 0x3 || *pos & 0x3)
  1581. return -EINVAL;
  1582. if (*pos >= adev->gmc.mc_vram_size)
  1583. return -ENXIO;
  1584. while (size) {
  1585. unsigned long flags;
  1586. uint32_t value;
  1587. if (*pos >= adev->gmc.mc_vram_size)
  1588. return result;
  1589. spin_lock_irqsave(&adev->mmio_idx_lock, flags);
  1590. WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
  1591. WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
  1592. value = RREG32_NO_KIQ(mmMM_DATA);
  1593. spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
  1594. r = put_user(value, (uint32_t *)buf);
  1595. if (r)
  1596. return r;
  1597. result += 4;
  1598. buf += 4;
  1599. *pos += 4;
  1600. size -= 4;
  1601. }
  1602. return result;
  1603. }
  1604. static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
  1605. size_t size, loff_t *pos)
  1606. {
  1607. struct amdgpu_device *adev = file_inode(f)->i_private;
  1608. ssize_t result = 0;
  1609. int r;
  1610. if (size & 0x3 || *pos & 0x3)
  1611. return -EINVAL;
  1612. if (*pos >= adev->gmc.mc_vram_size)
  1613. return -ENXIO;
  1614. while (size) {
  1615. unsigned long flags;
  1616. uint32_t value;
  1617. if (*pos >= adev->gmc.mc_vram_size)
  1618. return result;
  1619. r = get_user(value, (uint32_t *)buf);
  1620. if (r)
  1621. return r;
  1622. spin_lock_irqsave(&adev->mmio_idx_lock, flags);
  1623. WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
  1624. WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
  1625. WREG32_NO_KIQ(mmMM_DATA, value);
  1626. spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
  1627. result += 4;
  1628. buf += 4;
  1629. *pos += 4;
  1630. size -= 4;
  1631. }
  1632. return result;
  1633. }
  1634. static const struct file_operations amdgpu_ttm_vram_fops = {
  1635. .owner = THIS_MODULE,
  1636. .read = amdgpu_ttm_vram_read,
  1637. .write = amdgpu_ttm_vram_write,
  1638. .llseek = default_llseek,
  1639. };
  1640. #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
  1641. static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
  1642. size_t size, loff_t *pos)
  1643. {
  1644. struct amdgpu_device *adev = file_inode(f)->i_private;
  1645. ssize_t result = 0;
  1646. int r;
  1647. while (size) {
  1648. loff_t p = *pos / PAGE_SIZE;
  1649. unsigned off = *pos & ~PAGE_MASK;
  1650. size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
  1651. struct page *page;
  1652. void *ptr;
  1653. if (p >= adev->gart.num_cpu_pages)
  1654. return result;
  1655. page = adev->gart.pages[p];
  1656. if (page) {
  1657. ptr = kmap(page);
  1658. ptr += off;
  1659. r = copy_to_user(buf, ptr, cur_size);
  1660. kunmap(adev->gart.pages[p]);
  1661. } else
  1662. r = clear_user(buf, cur_size);
  1663. if (r)
  1664. return -EFAULT;
  1665. result += cur_size;
  1666. buf += cur_size;
  1667. *pos += cur_size;
  1668. size -= cur_size;
  1669. }
  1670. return result;
  1671. }
  1672. static const struct file_operations amdgpu_ttm_gtt_fops = {
  1673. .owner = THIS_MODULE,
  1674. .read = amdgpu_ttm_gtt_read,
  1675. .llseek = default_llseek
  1676. };
  1677. #endif
  1678. static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
  1679. size_t size, loff_t *pos)
  1680. {
  1681. struct amdgpu_device *adev = file_inode(f)->i_private;
  1682. struct iommu_domain *dom;
  1683. ssize_t result = 0;
  1684. int r;
  1685. dom = iommu_get_domain_for_dev(adev->dev);
  1686. while (size) {
  1687. phys_addr_t addr = *pos & PAGE_MASK;
  1688. loff_t off = *pos & ~PAGE_MASK;
  1689. size_t bytes = PAGE_SIZE - off;
  1690. unsigned long pfn;
  1691. struct page *p;
  1692. void *ptr;
  1693. bytes = bytes < size ? bytes : size;
  1694. addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
  1695. pfn = addr >> PAGE_SHIFT;
  1696. if (!pfn_valid(pfn))
  1697. return -EPERM;
  1698. p = pfn_to_page(pfn);
  1699. if (p->mapping != adev->mman.bdev.dev_mapping)
  1700. return -EPERM;
  1701. ptr = kmap(p);
  1702. r = copy_to_user(buf, ptr, bytes);
  1703. kunmap(p);
  1704. if (r)
  1705. return -EFAULT;
  1706. size -= bytes;
  1707. *pos += bytes;
  1708. result += bytes;
  1709. }
  1710. return result;
  1711. }
  1712. static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
  1713. size_t size, loff_t *pos)
  1714. {
  1715. struct amdgpu_device *adev = file_inode(f)->i_private;
  1716. struct iommu_domain *dom;
  1717. ssize_t result = 0;
  1718. int r;
  1719. dom = iommu_get_domain_for_dev(adev->dev);
  1720. while (size) {
  1721. phys_addr_t addr = *pos & PAGE_MASK;
  1722. loff_t off = *pos & ~PAGE_MASK;
  1723. size_t bytes = PAGE_SIZE - off;
  1724. unsigned long pfn;
  1725. struct page *p;
  1726. void *ptr;
  1727. bytes = bytes < size ? bytes : size;
  1728. addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
  1729. pfn = addr >> PAGE_SHIFT;
  1730. if (!pfn_valid(pfn))
  1731. return -EPERM;
  1732. p = pfn_to_page(pfn);
  1733. if (p->mapping != adev->mman.bdev.dev_mapping)
  1734. return -EPERM;
  1735. ptr = kmap(p);
  1736. r = copy_from_user(ptr, buf, bytes);
  1737. kunmap(p);
  1738. if (r)
  1739. return -EFAULT;
  1740. size -= bytes;
  1741. *pos += bytes;
  1742. result += bytes;
  1743. }
  1744. return result;
  1745. }
  1746. static const struct file_operations amdgpu_ttm_iomem_fops = {
  1747. .owner = THIS_MODULE,
  1748. .read = amdgpu_iomem_read,
  1749. .write = amdgpu_iomem_write,
  1750. .llseek = default_llseek
  1751. };
  1752. static const struct {
  1753. char *name;
  1754. const struct file_operations *fops;
  1755. int domain;
  1756. } ttm_debugfs_entries[] = {
  1757. { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM },
  1758. #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
  1759. { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
  1760. #endif
  1761. { "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM },
  1762. };
  1763. #endif
  1764. static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
  1765. {
  1766. #if defined(CONFIG_DEBUG_FS)
  1767. unsigned count;
  1768. struct drm_minor *minor = adev->ddev->primary;
  1769. struct dentry *ent, *root = minor->debugfs_root;
  1770. for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
  1771. ent = debugfs_create_file(
  1772. ttm_debugfs_entries[count].name,
  1773. S_IFREG | S_IRUGO, root,
  1774. adev,
  1775. ttm_debugfs_entries[count].fops);
  1776. if (IS_ERR(ent))
  1777. return PTR_ERR(ent);
  1778. if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
  1779. i_size_write(ent->d_inode, adev->gmc.mc_vram_size);
  1780. else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
  1781. i_size_write(ent->d_inode, adev->gmc.gart_size);
  1782. adev->mman.debugfs_entries[count] = ent;
  1783. }
  1784. count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
  1785. #ifdef CONFIG_SWIOTLB
  1786. if (!(adev->need_swiotlb && swiotlb_nr_tbl()))
  1787. --count;
  1788. #endif
  1789. return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
  1790. #else
  1791. return 0;
  1792. #endif
  1793. }
  1794. static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
  1795. {
  1796. #if defined(CONFIG_DEBUG_FS)
  1797. unsigned i;
  1798. for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++)
  1799. debugfs_remove(adev->mman.debugfs_entries[i]);
  1800. #endif
  1801. }