amdgpu_ttm.c 54 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146
  1. /*
  2. * Copyright 2009 Jerome Glisse.
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the
  7. * "Software"), to deal in the Software without restriction, including
  8. * without limitation the rights to use, copy, modify, merge, publish,
  9. * distribute, sub license, and/or sell copies of the Software, and to
  10. * permit persons to whom the Software is furnished to do so, subject to
  11. * the following conditions:
  12. *
  13. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20. *
  21. * The above copyright notice and this permission notice (including the
  22. * next paragraph) shall be included in all copies or substantial portions
  23. * of the Software.
  24. *
  25. */
  26. /*
  27. * Authors:
  28. * Jerome Glisse <glisse@freedesktop.org>
  29. * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
  30. * Dave Airlie
  31. */
  32. #include <drm/ttm/ttm_bo_api.h>
  33. #include <drm/ttm/ttm_bo_driver.h>
  34. #include <drm/ttm/ttm_placement.h>
  35. #include <drm/ttm/ttm_module.h>
  36. #include <drm/ttm/ttm_page_alloc.h>
  37. #include <drm/drmP.h>
  38. #include <drm/amdgpu_drm.h>
  39. #include <linux/seq_file.h>
  40. #include <linux/slab.h>
  41. #include <linux/swiotlb.h>
  42. #include <linux/swap.h>
  43. #include <linux/pagemap.h>
  44. #include <linux/debugfs.h>
  45. #include <linux/iommu.h>
  46. #include "amdgpu.h"
  47. #include "amdgpu_object.h"
  48. #include "amdgpu_trace.h"
  49. #include "amdgpu_amdkfd.h"
  50. #include "bif/bif_4_1_d.h"
  51. #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
  52. static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
  53. struct ttm_mem_reg *mem, unsigned num_pages,
  54. uint64_t offset, unsigned window,
  55. struct amdgpu_ring *ring,
  56. uint64_t *addr);
  57. static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
  58. static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
  59. /*
  60. * Global memory.
  61. */
  62. static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref)
  63. {
  64. return ttm_mem_global_init(ref->object);
  65. }
  66. static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
  67. {
  68. ttm_mem_global_release(ref->object);
  69. }
  70. static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
  71. {
  72. struct drm_global_reference *global_ref;
  73. struct amdgpu_ring *ring;
  74. struct drm_sched_rq *rq;
  75. int r;
  76. adev->mman.mem_global_referenced = false;
  77. global_ref = &adev->mman.mem_global_ref;
  78. global_ref->global_type = DRM_GLOBAL_TTM_MEM;
  79. global_ref->size = sizeof(struct ttm_mem_global);
  80. global_ref->init = &amdgpu_ttm_mem_global_init;
  81. global_ref->release = &amdgpu_ttm_mem_global_release;
  82. r = drm_global_item_ref(global_ref);
  83. if (r) {
  84. DRM_ERROR("Failed setting up TTM memory accounting "
  85. "subsystem.\n");
  86. goto error_mem;
  87. }
  88. adev->mman.bo_global_ref.mem_glob =
  89. adev->mman.mem_global_ref.object;
  90. global_ref = &adev->mman.bo_global_ref.ref;
  91. global_ref->global_type = DRM_GLOBAL_TTM_BO;
  92. global_ref->size = sizeof(struct ttm_bo_global);
  93. global_ref->init = &ttm_bo_global_init;
  94. global_ref->release = &ttm_bo_global_release;
  95. r = drm_global_item_ref(global_ref);
  96. if (r) {
  97. DRM_ERROR("Failed setting up TTM BO subsystem.\n");
  98. goto error_bo;
  99. }
  100. mutex_init(&adev->mman.gtt_window_lock);
  101. ring = adev->mman.buffer_funcs_ring;
  102. rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
  103. r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
  104. rq, amdgpu_sched_jobs, NULL);
  105. if (r) {
  106. DRM_ERROR("Failed setting up TTM BO move run queue.\n");
  107. goto error_entity;
  108. }
  109. adev->mman.mem_global_referenced = true;
  110. return 0;
  111. error_entity:
  112. drm_global_item_unref(&adev->mman.bo_global_ref.ref);
  113. error_bo:
  114. drm_global_item_unref(&adev->mman.mem_global_ref);
  115. error_mem:
  116. return r;
  117. }
  118. static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
  119. {
  120. if (adev->mman.mem_global_referenced) {
  121. drm_sched_entity_fini(adev->mman.entity.sched,
  122. &adev->mman.entity);
  123. mutex_destroy(&adev->mman.gtt_window_lock);
  124. drm_global_item_unref(&adev->mman.bo_global_ref.ref);
  125. drm_global_item_unref(&adev->mman.mem_global_ref);
  126. adev->mman.mem_global_referenced = false;
  127. }
  128. }
  129. static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
  130. {
  131. return 0;
  132. }
  133. static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
  134. struct ttm_mem_type_manager *man)
  135. {
  136. struct amdgpu_device *adev;
  137. adev = amdgpu_ttm_adev(bdev);
  138. switch (type) {
  139. case TTM_PL_SYSTEM:
  140. /* System memory */
  141. man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
  142. man->available_caching = TTM_PL_MASK_CACHING;
  143. man->default_caching = TTM_PL_FLAG_CACHED;
  144. break;
  145. case TTM_PL_TT:
  146. man->func = &amdgpu_gtt_mgr_func;
  147. man->gpu_offset = adev->gmc.gart_start;
  148. man->available_caching = TTM_PL_MASK_CACHING;
  149. man->default_caching = TTM_PL_FLAG_CACHED;
  150. man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
  151. break;
  152. case TTM_PL_VRAM:
  153. /* "On-card" video ram */
  154. man->func = &amdgpu_vram_mgr_func;
  155. man->gpu_offset = adev->gmc.vram_start;
  156. man->flags = TTM_MEMTYPE_FLAG_FIXED |
  157. TTM_MEMTYPE_FLAG_MAPPABLE;
  158. man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
  159. man->default_caching = TTM_PL_FLAG_WC;
  160. break;
  161. case AMDGPU_PL_GDS:
  162. case AMDGPU_PL_GWS:
  163. case AMDGPU_PL_OA:
  164. /* On-chip GDS memory*/
  165. man->func = &ttm_bo_manager_func;
  166. man->gpu_offset = 0;
  167. man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA;
  168. man->available_caching = TTM_PL_FLAG_UNCACHED;
  169. man->default_caching = TTM_PL_FLAG_UNCACHED;
  170. break;
  171. default:
  172. DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
  173. return -EINVAL;
  174. }
  175. return 0;
  176. }
  177. static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
  178. struct ttm_placement *placement)
  179. {
  180. struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
  181. struct amdgpu_bo *abo;
  182. static const struct ttm_place placements = {
  183. .fpfn = 0,
  184. .lpfn = 0,
  185. .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
  186. };
  187. if (bo->type == ttm_bo_type_sg) {
  188. placement->num_placement = 0;
  189. placement->num_busy_placement = 0;
  190. return;
  191. }
  192. if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
  193. placement->placement = &placements;
  194. placement->busy_placement = &placements;
  195. placement->num_placement = 1;
  196. placement->num_busy_placement = 1;
  197. return;
  198. }
  199. abo = ttm_to_amdgpu_bo(bo);
  200. switch (bo->mem.mem_type) {
  201. case TTM_PL_VRAM:
  202. if (!adev->mman.buffer_funcs_enabled) {
  203. amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
  204. } else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
  205. !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
  206. unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
  207. struct drm_mm_node *node = bo->mem.mm_node;
  208. unsigned long pages_left;
  209. for (pages_left = bo->mem.num_pages;
  210. pages_left;
  211. pages_left -= node->size, node++) {
  212. if (node->start < fpfn)
  213. break;
  214. }
  215. if (!pages_left)
  216. goto gtt;
  217. /* Try evicting to the CPU inaccessible part of VRAM
  218. * first, but only set GTT as busy placement, so this
  219. * BO will be evicted to GTT rather than causing other
  220. * BOs to be evicted from VRAM
  221. */
  222. amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
  223. AMDGPU_GEM_DOMAIN_GTT);
  224. abo->placements[0].fpfn = fpfn;
  225. abo->placements[0].lpfn = 0;
  226. abo->placement.busy_placement = &abo->placements[1];
  227. abo->placement.num_busy_placement = 1;
  228. } else {
  229. gtt:
  230. amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
  231. }
  232. break;
  233. case TTM_PL_TT:
  234. default:
  235. amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
  236. }
  237. *placement = abo->placement;
  238. }
  239. static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
  240. {
  241. struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
  242. /*
  243. * Don't verify access for KFD BOs. They don't have a GEM
  244. * object associated with them.
  245. */
  246. if (abo->kfd_bo)
  247. return 0;
  248. if (amdgpu_ttm_tt_get_usermm(bo->ttm))
  249. return -EPERM;
  250. return drm_vma_node_verify_access(&abo->gem_base.vma_node,
  251. filp->private_data);
  252. }
  253. static void amdgpu_move_null(struct ttm_buffer_object *bo,
  254. struct ttm_mem_reg *new_mem)
  255. {
  256. struct ttm_mem_reg *old_mem = &bo->mem;
  257. BUG_ON(old_mem->mm_node != NULL);
  258. *old_mem = *new_mem;
  259. new_mem->mm_node = NULL;
  260. }
  261. static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
  262. struct drm_mm_node *mm_node,
  263. struct ttm_mem_reg *mem)
  264. {
  265. uint64_t addr = 0;
  266. if (mem->mem_type != TTM_PL_TT || amdgpu_gtt_mgr_has_gart_addr(mem)) {
  267. addr = mm_node->start << PAGE_SHIFT;
  268. addr += bo->bdev->man[mem->mem_type].gpu_offset;
  269. }
  270. return addr;
  271. }
  272. /**
  273. * amdgpu_find_mm_node - Helper function finds the drm_mm_node
  274. * corresponding to @offset. It also modifies the offset to be
  275. * within the drm_mm_node returned
  276. */
  277. static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
  278. unsigned long *offset)
  279. {
  280. struct drm_mm_node *mm_node = mem->mm_node;
  281. while (*offset >= (mm_node->size << PAGE_SHIFT)) {
  282. *offset -= (mm_node->size << PAGE_SHIFT);
  283. ++mm_node;
  284. }
  285. return mm_node;
  286. }
  287. /**
  288. * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
  289. *
  290. * The function copies @size bytes from {src->mem + src->offset} to
  291. * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
  292. * move and different for a BO to BO copy.
  293. *
  294. * @f: Returns the last fence if multiple jobs are submitted.
  295. */
  296. int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
  297. struct amdgpu_copy_mem *src,
  298. struct amdgpu_copy_mem *dst,
  299. uint64_t size,
  300. struct reservation_object *resv,
  301. struct dma_fence **f)
  302. {
  303. struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
  304. struct drm_mm_node *src_mm, *dst_mm;
  305. uint64_t src_node_start, dst_node_start, src_node_size,
  306. dst_node_size, src_page_offset, dst_page_offset;
  307. struct dma_fence *fence = NULL;
  308. int r = 0;
  309. const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
  310. AMDGPU_GPU_PAGE_SIZE);
  311. if (!adev->mman.buffer_funcs_enabled) {
  312. DRM_ERROR("Trying to move memory with ring turned off.\n");
  313. return -EINVAL;
  314. }
  315. src_mm = amdgpu_find_mm_node(src->mem, &src->offset);
  316. src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) +
  317. src->offset;
  318. src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset;
  319. src_page_offset = src_node_start & (PAGE_SIZE - 1);
  320. dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset);
  321. dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) +
  322. dst->offset;
  323. dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset;
  324. dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
  325. mutex_lock(&adev->mman.gtt_window_lock);
  326. while (size) {
  327. unsigned long cur_size;
  328. uint64_t from = src_node_start, to = dst_node_start;
  329. struct dma_fence *next;
  330. /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
  331. * begins at an offset, then adjust the size accordingly
  332. */
  333. cur_size = min3(min(src_node_size, dst_node_size), size,
  334. GTT_MAX_BYTES);
  335. if (cur_size + src_page_offset > GTT_MAX_BYTES ||
  336. cur_size + dst_page_offset > GTT_MAX_BYTES)
  337. cur_size -= max(src_page_offset, dst_page_offset);
  338. /* Map only what needs to be accessed. Map src to window 0 and
  339. * dst to window 1
  340. */
  341. if (src->mem->mem_type == TTM_PL_TT &&
  342. !amdgpu_gtt_mgr_has_gart_addr(src->mem)) {
  343. r = amdgpu_map_buffer(src->bo, src->mem,
  344. PFN_UP(cur_size + src_page_offset),
  345. src_node_start, 0, ring,
  346. &from);
  347. if (r)
  348. goto error;
  349. /* Adjust the offset because amdgpu_map_buffer returns
  350. * start of mapped page
  351. */
  352. from += src_page_offset;
  353. }
  354. if (dst->mem->mem_type == TTM_PL_TT &&
  355. !amdgpu_gtt_mgr_has_gart_addr(dst->mem)) {
  356. r = amdgpu_map_buffer(dst->bo, dst->mem,
  357. PFN_UP(cur_size + dst_page_offset),
  358. dst_node_start, 1, ring,
  359. &to);
  360. if (r)
  361. goto error;
  362. to += dst_page_offset;
  363. }
  364. r = amdgpu_copy_buffer(ring, from, to, cur_size,
  365. resv, &next, false, true);
  366. if (r)
  367. goto error;
  368. dma_fence_put(fence);
  369. fence = next;
  370. size -= cur_size;
  371. if (!size)
  372. break;
  373. src_node_size -= cur_size;
  374. if (!src_node_size) {
  375. src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm,
  376. src->mem);
  377. src_node_size = (src_mm->size << PAGE_SHIFT);
  378. } else {
  379. src_node_start += cur_size;
  380. src_page_offset = src_node_start & (PAGE_SIZE - 1);
  381. }
  382. dst_node_size -= cur_size;
  383. if (!dst_node_size) {
  384. dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm,
  385. dst->mem);
  386. dst_node_size = (dst_mm->size << PAGE_SHIFT);
  387. } else {
  388. dst_node_start += cur_size;
  389. dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
  390. }
  391. }
  392. error:
  393. mutex_unlock(&adev->mman.gtt_window_lock);
  394. if (f)
  395. *f = dma_fence_get(fence);
  396. dma_fence_put(fence);
  397. return r;
  398. }
  399. static int amdgpu_move_blit(struct ttm_buffer_object *bo,
  400. bool evict, bool no_wait_gpu,
  401. struct ttm_mem_reg *new_mem,
  402. struct ttm_mem_reg *old_mem)
  403. {
  404. struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
  405. struct amdgpu_copy_mem src, dst;
  406. struct dma_fence *fence = NULL;
  407. int r;
  408. src.bo = bo;
  409. dst.bo = bo;
  410. src.mem = old_mem;
  411. dst.mem = new_mem;
  412. src.offset = 0;
  413. dst.offset = 0;
  414. r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
  415. new_mem->num_pages << PAGE_SHIFT,
  416. bo->resv, &fence);
  417. if (r)
  418. goto error;
  419. r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
  420. dma_fence_put(fence);
  421. return r;
  422. error:
  423. if (fence)
  424. dma_fence_wait(fence, false);
  425. dma_fence_put(fence);
  426. return r;
  427. }
  428. static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
  429. struct ttm_operation_ctx *ctx,
  430. struct ttm_mem_reg *new_mem)
  431. {
  432. struct amdgpu_device *adev;
  433. struct ttm_mem_reg *old_mem = &bo->mem;
  434. struct ttm_mem_reg tmp_mem;
  435. struct ttm_place placements;
  436. struct ttm_placement placement;
  437. int r;
  438. adev = amdgpu_ttm_adev(bo->bdev);
  439. tmp_mem = *new_mem;
  440. tmp_mem.mm_node = NULL;
  441. placement.num_placement = 1;
  442. placement.placement = &placements;
  443. placement.num_busy_placement = 1;
  444. placement.busy_placement = &placements;
  445. placements.fpfn = 0;
  446. placements.lpfn = 0;
  447. placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
  448. r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
  449. if (unlikely(r)) {
  450. return r;
  451. }
  452. r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
  453. if (unlikely(r)) {
  454. goto out_cleanup;
  455. }
  456. r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx);
  457. if (unlikely(r)) {
  458. goto out_cleanup;
  459. }
  460. r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, &tmp_mem, old_mem);
  461. if (unlikely(r)) {
  462. goto out_cleanup;
  463. }
  464. r = ttm_bo_move_ttm(bo, ctx, new_mem);
  465. out_cleanup:
  466. ttm_bo_mem_put(bo, &tmp_mem);
  467. return r;
  468. }
  469. static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
  470. struct ttm_operation_ctx *ctx,
  471. struct ttm_mem_reg *new_mem)
  472. {
  473. struct amdgpu_device *adev;
  474. struct ttm_mem_reg *old_mem = &bo->mem;
  475. struct ttm_mem_reg tmp_mem;
  476. struct ttm_placement placement;
  477. struct ttm_place placements;
  478. int r;
  479. adev = amdgpu_ttm_adev(bo->bdev);
  480. tmp_mem = *new_mem;
  481. tmp_mem.mm_node = NULL;
  482. placement.num_placement = 1;
  483. placement.placement = &placements;
  484. placement.num_busy_placement = 1;
  485. placement.busy_placement = &placements;
  486. placements.fpfn = 0;
  487. placements.lpfn = 0;
  488. placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
  489. r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
  490. if (unlikely(r)) {
  491. return r;
  492. }
  493. r = ttm_bo_move_ttm(bo, ctx, &tmp_mem);
  494. if (unlikely(r)) {
  495. goto out_cleanup;
  496. }
  497. r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, new_mem, old_mem);
  498. if (unlikely(r)) {
  499. goto out_cleanup;
  500. }
  501. out_cleanup:
  502. ttm_bo_mem_put(bo, &tmp_mem);
  503. return r;
  504. }
  505. static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
  506. struct ttm_operation_ctx *ctx,
  507. struct ttm_mem_reg *new_mem)
  508. {
  509. struct amdgpu_device *adev;
  510. struct amdgpu_bo *abo;
  511. struct ttm_mem_reg *old_mem = &bo->mem;
  512. int r;
  513. /* Can't move a pinned BO */
  514. abo = ttm_to_amdgpu_bo(bo);
  515. if (WARN_ON_ONCE(abo->pin_count > 0))
  516. return -EINVAL;
  517. adev = amdgpu_ttm_adev(bo->bdev);
  518. if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
  519. amdgpu_move_null(bo, new_mem);
  520. return 0;
  521. }
  522. if ((old_mem->mem_type == TTM_PL_TT &&
  523. new_mem->mem_type == TTM_PL_SYSTEM) ||
  524. (old_mem->mem_type == TTM_PL_SYSTEM &&
  525. new_mem->mem_type == TTM_PL_TT)) {
  526. /* bind is enough */
  527. amdgpu_move_null(bo, new_mem);
  528. return 0;
  529. }
  530. if (!adev->mman.buffer_funcs_enabled)
  531. goto memcpy;
  532. if (old_mem->mem_type == TTM_PL_VRAM &&
  533. new_mem->mem_type == TTM_PL_SYSTEM) {
  534. r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem);
  535. } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
  536. new_mem->mem_type == TTM_PL_VRAM) {
  537. r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem);
  538. } else {
  539. r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu,
  540. new_mem, old_mem);
  541. }
  542. if (r) {
  543. memcpy:
  544. r = ttm_bo_move_memcpy(bo, ctx, new_mem);
  545. if (r) {
  546. return r;
  547. }
  548. }
  549. if (bo->type == ttm_bo_type_device &&
  550. new_mem->mem_type == TTM_PL_VRAM &&
  551. old_mem->mem_type != TTM_PL_VRAM) {
  552. /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
  553. * accesses the BO after it's moved.
  554. */
  555. abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
  556. }
  557. /* update statistics */
  558. atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
  559. return 0;
  560. }
  561. static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  562. {
  563. struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
  564. struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
  565. struct drm_mm_node *mm_node = mem->mm_node;
  566. mem->bus.addr = NULL;
  567. mem->bus.offset = 0;
  568. mem->bus.size = mem->num_pages << PAGE_SHIFT;
  569. mem->bus.base = 0;
  570. mem->bus.is_iomem = false;
  571. if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
  572. return -EINVAL;
  573. switch (mem->mem_type) {
  574. case TTM_PL_SYSTEM:
  575. /* system memory */
  576. return 0;
  577. case TTM_PL_TT:
  578. break;
  579. case TTM_PL_VRAM:
  580. mem->bus.offset = mem->start << PAGE_SHIFT;
  581. /* check if it's visible */
  582. if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size)
  583. return -EINVAL;
  584. /* Only physically contiguous buffers apply. In a contiguous
  585. * buffer, size of the first mm_node would match the number of
  586. * pages in ttm_mem_reg.
  587. */
  588. if (adev->mman.aper_base_kaddr &&
  589. (mm_node->size == mem->num_pages))
  590. mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
  591. mem->bus.offset;
  592. mem->bus.base = adev->gmc.aper_base;
  593. mem->bus.is_iomem = true;
  594. break;
  595. default:
  596. return -EINVAL;
  597. }
  598. return 0;
  599. }
  600. static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  601. {
  602. }
  603. static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
  604. unsigned long page_offset)
  605. {
  606. struct drm_mm_node *mm;
  607. unsigned long offset = (page_offset << PAGE_SHIFT);
  608. mm = amdgpu_find_mm_node(&bo->mem, &offset);
  609. return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
  610. (offset >> PAGE_SHIFT);
  611. }
  612. /*
  613. * TTM backend functions.
  614. */
  615. struct amdgpu_ttm_gup_task_list {
  616. struct list_head list;
  617. struct task_struct *task;
  618. };
  619. struct amdgpu_ttm_tt {
  620. struct ttm_dma_tt ttm;
  621. u64 offset;
  622. uint64_t userptr;
  623. struct mm_struct *usermm;
  624. uint32_t userflags;
  625. spinlock_t guptasklock;
  626. struct list_head guptasks;
  627. atomic_t mmu_invalidations;
  628. uint32_t last_set_pages;
  629. };
  630. int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
  631. {
  632. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  633. unsigned int flags = 0;
  634. unsigned pinned = 0;
  635. int r;
  636. if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
  637. flags |= FOLL_WRITE;
  638. down_read(&current->mm->mmap_sem);
  639. if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
  640. /* check that we only use anonymous memory
  641. to prevent problems with writeback */
  642. unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
  643. struct vm_area_struct *vma;
  644. vma = find_vma(gtt->usermm, gtt->userptr);
  645. if (!vma || vma->vm_file || vma->vm_end < end) {
  646. up_read(&current->mm->mmap_sem);
  647. return -EPERM;
  648. }
  649. }
  650. do {
  651. unsigned num_pages = ttm->num_pages - pinned;
  652. uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
  653. struct page **p = pages + pinned;
  654. struct amdgpu_ttm_gup_task_list guptask;
  655. guptask.task = current;
  656. spin_lock(&gtt->guptasklock);
  657. list_add(&guptask.list, &gtt->guptasks);
  658. spin_unlock(&gtt->guptasklock);
  659. r = get_user_pages(userptr, num_pages, flags, p, NULL);
  660. spin_lock(&gtt->guptasklock);
  661. list_del(&guptask.list);
  662. spin_unlock(&gtt->guptasklock);
  663. if (r < 0)
  664. goto release_pages;
  665. pinned += r;
  666. } while (pinned < ttm->num_pages);
  667. up_read(&current->mm->mmap_sem);
  668. return 0;
  669. release_pages:
  670. release_pages(pages, pinned);
  671. up_read(&current->mm->mmap_sem);
  672. return r;
  673. }
  674. void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
  675. {
  676. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  677. unsigned i;
  678. gtt->last_set_pages = atomic_read(&gtt->mmu_invalidations);
  679. for (i = 0; i < ttm->num_pages; ++i) {
  680. if (ttm->pages[i])
  681. put_page(ttm->pages[i]);
  682. ttm->pages[i] = pages ? pages[i] : NULL;
  683. }
  684. }
  685. void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
  686. {
  687. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  688. unsigned i;
  689. for (i = 0; i < ttm->num_pages; ++i) {
  690. struct page *page = ttm->pages[i];
  691. if (!page)
  692. continue;
  693. if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
  694. set_page_dirty(page);
  695. mark_page_accessed(page);
  696. }
  697. }
  698. /* prepare the sg table with the user pages */
  699. static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
  700. {
  701. struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
  702. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  703. unsigned nents;
  704. int r;
  705. int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
  706. enum dma_data_direction direction = write ?
  707. DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
  708. r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
  709. ttm->num_pages << PAGE_SHIFT,
  710. GFP_KERNEL);
  711. if (r)
  712. goto release_sg;
  713. r = -ENOMEM;
  714. nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
  715. if (nents != ttm->sg->nents)
  716. goto release_sg;
  717. drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
  718. gtt->ttm.dma_address, ttm->num_pages);
  719. return 0;
  720. release_sg:
  721. kfree(ttm->sg);
  722. return r;
  723. }
  724. static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
  725. {
  726. struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
  727. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  728. int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
  729. enum dma_data_direction direction = write ?
  730. DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
  731. /* double check that we don't free the table twice */
  732. if (!ttm->sg->sgl)
  733. return;
  734. /* free the sg table and pages again */
  735. dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
  736. amdgpu_ttm_tt_mark_user_pages(ttm);
  737. sg_free_table(ttm->sg);
  738. }
  739. static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
  740. struct ttm_mem_reg *bo_mem)
  741. {
  742. struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
  743. struct amdgpu_ttm_tt *gtt = (void*)ttm;
  744. uint64_t flags;
  745. int r = 0;
  746. if (gtt->userptr) {
  747. r = amdgpu_ttm_tt_pin_userptr(ttm);
  748. if (r) {
  749. DRM_ERROR("failed to pin userptr\n");
  750. return r;
  751. }
  752. }
  753. if (!ttm->num_pages) {
  754. WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
  755. ttm->num_pages, bo_mem, ttm);
  756. }
  757. if (bo_mem->mem_type == AMDGPU_PL_GDS ||
  758. bo_mem->mem_type == AMDGPU_PL_GWS ||
  759. bo_mem->mem_type == AMDGPU_PL_OA)
  760. return -EINVAL;
  761. if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
  762. gtt->offset = AMDGPU_BO_INVALID_OFFSET;
  763. return 0;
  764. }
  765. flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
  766. gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
  767. r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
  768. ttm->pages, gtt->ttm.dma_address, flags);
  769. if (r)
  770. DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
  771. ttm->num_pages, gtt->offset);
  772. return r;
  773. }
  774. int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
  775. {
  776. struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
  777. struct ttm_operation_ctx ctx = { false, false };
  778. struct amdgpu_ttm_tt *gtt = (void*)bo->ttm;
  779. struct ttm_mem_reg tmp;
  780. struct ttm_placement placement;
  781. struct ttm_place placements;
  782. uint64_t flags;
  783. int r;
  784. if (bo->mem.mem_type != TTM_PL_TT ||
  785. amdgpu_gtt_mgr_has_gart_addr(&bo->mem))
  786. return 0;
  787. tmp = bo->mem;
  788. tmp.mm_node = NULL;
  789. placement.num_placement = 1;
  790. placement.placement = &placements;
  791. placement.num_busy_placement = 1;
  792. placement.busy_placement = &placements;
  793. placements.fpfn = 0;
  794. placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
  795. placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
  796. TTM_PL_FLAG_TT;
  797. r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
  798. if (unlikely(r))
  799. return r;
  800. flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
  801. gtt->offset = (u64)tmp.start << PAGE_SHIFT;
  802. r = amdgpu_gart_bind(adev, gtt->offset, bo->ttm->num_pages,
  803. bo->ttm->pages, gtt->ttm.dma_address, flags);
  804. if (unlikely(r)) {
  805. ttm_bo_mem_put(bo, &tmp);
  806. return r;
  807. }
  808. ttm_bo_mem_put(bo, &bo->mem);
  809. bo->mem = tmp;
  810. bo->offset = (bo->mem.start << PAGE_SHIFT) +
  811. bo->bdev->man[bo->mem.mem_type].gpu_offset;
  812. return 0;
  813. }
  814. int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
  815. {
  816. struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
  817. struct amdgpu_ttm_tt *gtt = (void *)tbo->ttm;
  818. uint64_t flags;
  819. int r;
  820. if (!gtt)
  821. return 0;
  822. flags = amdgpu_ttm_tt_pte_flags(adev, &gtt->ttm.ttm, &tbo->mem);
  823. r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
  824. gtt->ttm.ttm.pages, gtt->ttm.dma_address, flags);
  825. if (r)
  826. DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
  827. gtt->ttm.ttm.num_pages, gtt->offset);
  828. return r;
  829. }
  830. static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
  831. {
  832. struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
  833. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  834. int r;
  835. if (gtt->userptr)
  836. amdgpu_ttm_tt_unpin_userptr(ttm);
  837. if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
  838. return 0;
  839. /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
  840. r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
  841. if (r)
  842. DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
  843. gtt->ttm.ttm.num_pages, gtt->offset);
  844. return r;
  845. }
  846. static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
  847. {
  848. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  849. ttm_dma_tt_fini(&gtt->ttm);
  850. kfree(gtt);
  851. }
  852. static struct ttm_backend_func amdgpu_backend_func = {
  853. .bind = &amdgpu_ttm_backend_bind,
  854. .unbind = &amdgpu_ttm_backend_unbind,
  855. .destroy = &amdgpu_ttm_backend_destroy,
  856. };
  857. static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
  858. uint32_t page_flags)
  859. {
  860. struct amdgpu_device *adev;
  861. struct amdgpu_ttm_tt *gtt;
  862. adev = amdgpu_ttm_adev(bo->bdev);
  863. gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
  864. if (gtt == NULL) {
  865. return NULL;
  866. }
  867. gtt->ttm.ttm.func = &amdgpu_backend_func;
  868. if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags)) {
  869. kfree(gtt);
  870. return NULL;
  871. }
  872. return &gtt->ttm.ttm;
  873. }
  874. static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
  875. struct ttm_operation_ctx *ctx)
  876. {
  877. struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
  878. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  879. bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
  880. if (gtt && gtt->userptr) {
  881. ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
  882. if (!ttm->sg)
  883. return -ENOMEM;
  884. ttm->page_flags |= TTM_PAGE_FLAG_SG;
  885. ttm->state = tt_unbound;
  886. return 0;
  887. }
  888. if (slave && ttm->sg) {
  889. drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
  890. gtt->ttm.dma_address,
  891. ttm->num_pages);
  892. ttm->state = tt_unbound;
  893. return 0;
  894. }
  895. #ifdef CONFIG_SWIOTLB
  896. if (adev->need_swiotlb && swiotlb_nr_tbl()) {
  897. return ttm_dma_populate(&gtt->ttm, adev->dev, ctx);
  898. }
  899. #endif
  900. return ttm_populate_and_map_pages(adev->dev, &gtt->ttm, ctx);
  901. }
  902. static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
  903. {
  904. struct amdgpu_device *adev;
  905. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  906. bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
  907. if (gtt && gtt->userptr) {
  908. amdgpu_ttm_tt_set_user_pages(ttm, NULL);
  909. kfree(ttm->sg);
  910. ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
  911. return;
  912. }
  913. if (slave)
  914. return;
  915. adev = amdgpu_ttm_adev(ttm->bdev);
  916. #ifdef CONFIG_SWIOTLB
  917. if (adev->need_swiotlb && swiotlb_nr_tbl()) {
  918. ttm_dma_unpopulate(&gtt->ttm, adev->dev);
  919. return;
  920. }
  921. #endif
  922. ttm_unmap_and_unpopulate_pages(adev->dev, &gtt->ttm);
  923. }
  924. int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
  925. uint32_t flags)
  926. {
  927. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  928. if (gtt == NULL)
  929. return -EINVAL;
  930. gtt->userptr = addr;
  931. gtt->usermm = current->mm;
  932. gtt->userflags = flags;
  933. spin_lock_init(&gtt->guptasklock);
  934. INIT_LIST_HEAD(&gtt->guptasks);
  935. atomic_set(&gtt->mmu_invalidations, 0);
  936. gtt->last_set_pages = 0;
  937. return 0;
  938. }
  939. struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
  940. {
  941. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  942. if (gtt == NULL)
  943. return NULL;
  944. return gtt->usermm;
  945. }
  946. bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
  947. unsigned long end)
  948. {
  949. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  950. struct amdgpu_ttm_gup_task_list *entry;
  951. unsigned long size;
  952. if (gtt == NULL || !gtt->userptr)
  953. return false;
  954. size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
  955. if (gtt->userptr > end || gtt->userptr + size <= start)
  956. return false;
  957. spin_lock(&gtt->guptasklock);
  958. list_for_each_entry(entry, &gtt->guptasks, list) {
  959. if (entry->task == current) {
  960. spin_unlock(&gtt->guptasklock);
  961. return false;
  962. }
  963. }
  964. spin_unlock(&gtt->guptasklock);
  965. atomic_inc(&gtt->mmu_invalidations);
  966. return true;
  967. }
  968. bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
  969. int *last_invalidated)
  970. {
  971. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  972. int prev_invalidated = *last_invalidated;
  973. *last_invalidated = atomic_read(&gtt->mmu_invalidations);
  974. return prev_invalidated != *last_invalidated;
  975. }
  976. bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
  977. {
  978. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  979. if (gtt == NULL || !gtt->userptr)
  980. return false;
  981. return atomic_read(&gtt->mmu_invalidations) != gtt->last_set_pages;
  982. }
  983. bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
  984. {
  985. struct amdgpu_ttm_tt *gtt = (void *)ttm;
  986. if (gtt == NULL)
  987. return false;
  988. return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
  989. }
  990. uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
  991. struct ttm_mem_reg *mem)
  992. {
  993. uint64_t flags = 0;
  994. if (mem && mem->mem_type != TTM_PL_SYSTEM)
  995. flags |= AMDGPU_PTE_VALID;
  996. if (mem && mem->mem_type == TTM_PL_TT) {
  997. flags |= AMDGPU_PTE_SYSTEM;
  998. if (ttm->caching_state == tt_cached)
  999. flags |= AMDGPU_PTE_SNOOPED;
  1000. }
  1001. flags |= adev->gart.gart_pte_flags;
  1002. flags |= AMDGPU_PTE_READABLE;
  1003. if (!amdgpu_ttm_tt_is_readonly(ttm))
  1004. flags |= AMDGPU_PTE_WRITEABLE;
  1005. return flags;
  1006. }
  1007. static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
  1008. const struct ttm_place *place)
  1009. {
  1010. unsigned long num_pages = bo->mem.num_pages;
  1011. struct drm_mm_node *node = bo->mem.mm_node;
  1012. struct reservation_object_list *flist;
  1013. struct dma_fence *f;
  1014. int i;
  1015. /* If bo is a KFD BO, check if the bo belongs to the current process.
  1016. * If true, then return false as any KFD process needs all its BOs to
  1017. * be resident to run successfully
  1018. */
  1019. flist = reservation_object_get_list(bo->resv);
  1020. if (flist) {
  1021. for (i = 0; i < flist->shared_count; ++i) {
  1022. f = rcu_dereference_protected(flist->shared[i],
  1023. reservation_object_held(bo->resv));
  1024. if (amdkfd_fence_check_mm(f, current->mm))
  1025. return false;
  1026. }
  1027. }
  1028. switch (bo->mem.mem_type) {
  1029. case TTM_PL_TT:
  1030. return true;
  1031. case TTM_PL_VRAM:
  1032. /* Check each drm MM node individually */
  1033. while (num_pages) {
  1034. if (place->fpfn < (node->start + node->size) &&
  1035. !(place->lpfn && place->lpfn <= node->start))
  1036. return true;
  1037. num_pages -= node->size;
  1038. ++node;
  1039. }
  1040. return false;
  1041. default:
  1042. break;
  1043. }
  1044. return ttm_bo_eviction_valuable(bo, place);
  1045. }
  1046. static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
  1047. unsigned long offset,
  1048. void *buf, int len, int write)
  1049. {
  1050. struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
  1051. struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
  1052. struct drm_mm_node *nodes;
  1053. uint32_t value = 0;
  1054. int ret = 0;
  1055. uint64_t pos;
  1056. unsigned long flags;
  1057. if (bo->mem.mem_type != TTM_PL_VRAM)
  1058. return -EIO;
  1059. nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
  1060. pos = (nodes->start << PAGE_SHIFT) + offset;
  1061. while (len && pos < adev->gmc.mc_vram_size) {
  1062. uint64_t aligned_pos = pos & ~(uint64_t)3;
  1063. uint32_t bytes = 4 - (pos & 3);
  1064. uint32_t shift = (pos & 3) * 8;
  1065. uint32_t mask = 0xffffffff << shift;
  1066. if (len < bytes) {
  1067. mask &= 0xffffffff >> (bytes - len) * 8;
  1068. bytes = len;
  1069. }
  1070. spin_lock_irqsave(&adev->mmio_idx_lock, flags);
  1071. WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
  1072. WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
  1073. if (!write || mask != 0xffffffff)
  1074. value = RREG32_NO_KIQ(mmMM_DATA);
  1075. if (write) {
  1076. value &= ~mask;
  1077. value |= (*(uint32_t *)buf << shift) & mask;
  1078. WREG32_NO_KIQ(mmMM_DATA, value);
  1079. }
  1080. spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
  1081. if (!write) {
  1082. value = (value & mask) >> shift;
  1083. memcpy(buf, &value, bytes);
  1084. }
  1085. ret += bytes;
  1086. buf = (uint8_t *)buf + bytes;
  1087. pos += bytes;
  1088. len -= bytes;
  1089. if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) {
  1090. ++nodes;
  1091. pos = (nodes->start << PAGE_SHIFT);
  1092. }
  1093. }
  1094. return ret;
  1095. }
  1096. static struct ttm_bo_driver amdgpu_bo_driver = {
  1097. .ttm_tt_create = &amdgpu_ttm_tt_create,
  1098. .ttm_tt_populate = &amdgpu_ttm_tt_populate,
  1099. .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
  1100. .invalidate_caches = &amdgpu_invalidate_caches,
  1101. .init_mem_type = &amdgpu_init_mem_type,
  1102. .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
  1103. .evict_flags = &amdgpu_evict_flags,
  1104. .move = &amdgpu_bo_move,
  1105. .verify_access = &amdgpu_verify_access,
  1106. .move_notify = &amdgpu_bo_move_notify,
  1107. .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
  1108. .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
  1109. .io_mem_free = &amdgpu_ttm_io_mem_free,
  1110. .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
  1111. .access_memory = &amdgpu_ttm_access_memory
  1112. };
  1113. /*
  1114. * Firmware Reservation functions
  1115. */
  1116. /**
  1117. * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
  1118. *
  1119. * @adev: amdgpu_device pointer
  1120. *
  1121. * free fw reserved vram if it has been reserved.
  1122. */
  1123. static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
  1124. {
  1125. amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
  1126. NULL, &adev->fw_vram_usage.va);
  1127. }
  1128. /**
  1129. * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
  1130. *
  1131. * @adev: amdgpu_device pointer
  1132. *
  1133. * create bo vram reservation from fw.
  1134. */
  1135. static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
  1136. {
  1137. struct ttm_operation_ctx ctx = { false, false };
  1138. int r = 0;
  1139. int i;
  1140. u64 vram_size = adev->gmc.visible_vram_size;
  1141. u64 offset = adev->fw_vram_usage.start_offset;
  1142. u64 size = adev->fw_vram_usage.size;
  1143. struct amdgpu_bo *bo;
  1144. adev->fw_vram_usage.va = NULL;
  1145. adev->fw_vram_usage.reserved_bo = NULL;
  1146. if (adev->fw_vram_usage.size > 0 &&
  1147. adev->fw_vram_usage.size <= vram_size) {
  1148. r = amdgpu_bo_create(adev, adev->fw_vram_usage.size, PAGE_SIZE,
  1149. AMDGPU_GEM_DOMAIN_VRAM,
  1150. AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
  1151. AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
  1152. ttm_bo_type_kernel, NULL,
  1153. &adev->fw_vram_usage.reserved_bo);
  1154. if (r)
  1155. goto error_create;
  1156. r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
  1157. if (r)
  1158. goto error_reserve;
  1159. /* remove the original mem node and create a new one at the
  1160. * request position
  1161. */
  1162. bo = adev->fw_vram_usage.reserved_bo;
  1163. offset = ALIGN(offset, PAGE_SIZE);
  1164. for (i = 0; i < bo->placement.num_placement; ++i) {
  1165. bo->placements[i].fpfn = offset >> PAGE_SHIFT;
  1166. bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
  1167. }
  1168. ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
  1169. r = ttm_bo_mem_space(&bo->tbo, &bo->placement,
  1170. &bo->tbo.mem, &ctx);
  1171. if (r)
  1172. goto error_pin;
  1173. r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
  1174. AMDGPU_GEM_DOMAIN_VRAM,
  1175. adev->fw_vram_usage.start_offset,
  1176. (adev->fw_vram_usage.start_offset +
  1177. adev->fw_vram_usage.size), NULL);
  1178. if (r)
  1179. goto error_pin;
  1180. r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
  1181. &adev->fw_vram_usage.va);
  1182. if (r)
  1183. goto error_kmap;
  1184. amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
  1185. }
  1186. return r;
  1187. error_kmap:
  1188. amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
  1189. error_pin:
  1190. amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
  1191. error_reserve:
  1192. amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
  1193. error_create:
  1194. adev->fw_vram_usage.va = NULL;
  1195. adev->fw_vram_usage.reserved_bo = NULL;
  1196. return r;
  1197. }
  1198. int amdgpu_ttm_init(struct amdgpu_device *adev)
  1199. {
  1200. uint64_t gtt_size;
  1201. int r;
  1202. u64 vis_vram_limit;
  1203. r = amdgpu_ttm_global_init(adev);
  1204. if (r) {
  1205. return r;
  1206. }
  1207. /* No others user of address space so set it to 0 */
  1208. r = ttm_bo_device_init(&adev->mman.bdev,
  1209. adev->mman.bo_global_ref.ref.object,
  1210. &amdgpu_bo_driver,
  1211. adev->ddev->anon_inode->i_mapping,
  1212. DRM_FILE_PAGE_OFFSET,
  1213. adev->need_dma32);
  1214. if (r) {
  1215. DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
  1216. return r;
  1217. }
  1218. adev->mman.initialized = true;
  1219. /* We opt to avoid OOM on system pages allocations */
  1220. adev->mman.bdev.no_retry = true;
  1221. r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
  1222. adev->gmc.real_vram_size >> PAGE_SHIFT);
  1223. if (r) {
  1224. DRM_ERROR("Failed initializing VRAM heap.\n");
  1225. return r;
  1226. }
  1227. /* Reduce size of CPU-visible VRAM if requested */
  1228. vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
  1229. if (amdgpu_vis_vram_limit > 0 &&
  1230. vis_vram_limit <= adev->gmc.visible_vram_size)
  1231. adev->gmc.visible_vram_size = vis_vram_limit;
  1232. /* Change the size here instead of the init above so only lpfn is affected */
  1233. amdgpu_ttm_set_buffer_funcs_status(adev, false);
  1234. #ifdef CONFIG_64BIT
  1235. adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
  1236. adev->gmc.visible_vram_size);
  1237. #endif
  1238. /*
  1239. *The reserved vram for firmware must be pinned to the specified
  1240. *place on the VRAM, so reserve it early.
  1241. */
  1242. r = amdgpu_ttm_fw_reserve_vram_init(adev);
  1243. if (r) {
  1244. return r;
  1245. }
  1246. r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
  1247. AMDGPU_GEM_DOMAIN_VRAM,
  1248. &adev->stolen_vga_memory,
  1249. NULL, NULL);
  1250. if (r)
  1251. return r;
  1252. DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
  1253. (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
  1254. if (amdgpu_gtt_size == -1) {
  1255. struct sysinfo si;
  1256. si_meminfo(&si);
  1257. gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
  1258. adev->gmc.mc_vram_size),
  1259. ((uint64_t)si.totalram * si.mem_unit * 3/4));
  1260. }
  1261. else
  1262. gtt_size = (uint64_t)amdgpu_gtt_size << 20;
  1263. r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
  1264. if (r) {
  1265. DRM_ERROR("Failed initializing GTT heap.\n");
  1266. return r;
  1267. }
  1268. DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
  1269. (unsigned)(gtt_size / (1024 * 1024)));
  1270. adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
  1271. adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
  1272. adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT;
  1273. adev->gds.gws.total_size = adev->gds.gws.total_size << AMDGPU_GWS_SHIFT;
  1274. adev->gds.gws.gfx_partition_size = adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT;
  1275. adev->gds.gws.cs_partition_size = adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT;
  1276. adev->gds.oa.total_size = adev->gds.oa.total_size << AMDGPU_OA_SHIFT;
  1277. adev->gds.oa.gfx_partition_size = adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT;
  1278. adev->gds.oa.cs_partition_size = adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT;
  1279. /* GDS Memory */
  1280. if (adev->gds.mem.total_size) {
  1281. r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
  1282. adev->gds.mem.total_size >> PAGE_SHIFT);
  1283. if (r) {
  1284. DRM_ERROR("Failed initializing GDS heap.\n");
  1285. return r;
  1286. }
  1287. }
  1288. /* GWS */
  1289. if (adev->gds.gws.total_size) {
  1290. r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
  1291. adev->gds.gws.total_size >> PAGE_SHIFT);
  1292. if (r) {
  1293. DRM_ERROR("Failed initializing gws heap.\n");
  1294. return r;
  1295. }
  1296. }
  1297. /* OA */
  1298. if (adev->gds.oa.total_size) {
  1299. r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
  1300. adev->gds.oa.total_size >> PAGE_SHIFT);
  1301. if (r) {
  1302. DRM_ERROR("Failed initializing oa heap.\n");
  1303. return r;
  1304. }
  1305. }
  1306. r = amdgpu_ttm_debugfs_init(adev);
  1307. if (r) {
  1308. DRM_ERROR("Failed to init debugfs\n");
  1309. return r;
  1310. }
  1311. return 0;
  1312. }
  1313. void amdgpu_ttm_fini(struct amdgpu_device *adev)
  1314. {
  1315. if (!adev->mman.initialized)
  1316. return;
  1317. amdgpu_ttm_debugfs_fini(adev);
  1318. amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
  1319. amdgpu_ttm_fw_reserve_vram_fini(adev);
  1320. if (adev->mman.aper_base_kaddr)
  1321. iounmap(adev->mman.aper_base_kaddr);
  1322. adev->mman.aper_base_kaddr = NULL;
  1323. ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
  1324. ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
  1325. if (adev->gds.mem.total_size)
  1326. ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS);
  1327. if (adev->gds.gws.total_size)
  1328. ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
  1329. if (adev->gds.oa.total_size)
  1330. ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
  1331. ttm_bo_device_release(&adev->mman.bdev);
  1332. amdgpu_ttm_global_fini(adev);
  1333. adev->mman.initialized = false;
  1334. DRM_INFO("amdgpu: ttm finalized\n");
  1335. }
  1336. /**
  1337. * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
  1338. *
  1339. * @adev: amdgpu_device pointer
  1340. * @enable: true when we can use buffer functions.
  1341. *
  1342. * Enable/disable use of buffer functions during suspend/resume. This should
  1343. * only be called at bootup or when userspace isn't running.
  1344. */
  1345. void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
  1346. {
  1347. struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
  1348. uint64_t size;
  1349. if (!adev->mman.initialized || adev->in_gpu_reset)
  1350. return;
  1351. /* this just adjusts TTM size idea, which sets lpfn to the correct value */
  1352. if (enable)
  1353. size = adev->gmc.real_vram_size;
  1354. else
  1355. size = adev->gmc.visible_vram_size;
  1356. man->size = size >> PAGE_SHIFT;
  1357. adev->mman.buffer_funcs_enabled = enable;
  1358. }
  1359. int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
  1360. {
  1361. struct drm_file *file_priv;
  1362. struct amdgpu_device *adev;
  1363. if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
  1364. return -EINVAL;
  1365. file_priv = filp->private_data;
  1366. adev = file_priv->minor->dev->dev_private;
  1367. if (adev == NULL)
  1368. return -EINVAL;
  1369. return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
  1370. }
  1371. static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
  1372. struct ttm_mem_reg *mem, unsigned num_pages,
  1373. uint64_t offset, unsigned window,
  1374. struct amdgpu_ring *ring,
  1375. uint64_t *addr)
  1376. {
  1377. struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
  1378. struct amdgpu_device *adev = ring->adev;
  1379. struct ttm_tt *ttm = bo->ttm;
  1380. struct amdgpu_job *job;
  1381. unsigned num_dw, num_bytes;
  1382. dma_addr_t *dma_address;
  1383. struct dma_fence *fence;
  1384. uint64_t src_addr, dst_addr;
  1385. uint64_t flags;
  1386. int r;
  1387. BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
  1388. AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
  1389. *addr = adev->gmc.gart_start;
  1390. *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
  1391. AMDGPU_GPU_PAGE_SIZE;
  1392. num_dw = adev->mman.buffer_funcs->copy_num_dw;
  1393. while (num_dw & 0x7)
  1394. num_dw++;
  1395. num_bytes = num_pages * 8;
  1396. r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
  1397. if (r)
  1398. return r;
  1399. src_addr = num_dw * 4;
  1400. src_addr += job->ibs[0].gpu_addr;
  1401. dst_addr = adev->gart.table_addr;
  1402. dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
  1403. amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
  1404. dst_addr, num_bytes);
  1405. amdgpu_ring_pad_ib(ring, &job->ibs[0]);
  1406. WARN_ON(job->ibs[0].length_dw > num_dw);
  1407. dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
  1408. flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
  1409. r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
  1410. &job->ibs[0].ptr[num_dw]);
  1411. if (r)
  1412. goto error_free;
  1413. r = amdgpu_job_submit(job, ring, &adev->mman.entity,
  1414. AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
  1415. if (r)
  1416. goto error_free;
  1417. dma_fence_put(fence);
  1418. return r;
  1419. error_free:
  1420. amdgpu_job_free(job);
  1421. return r;
  1422. }
  1423. int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
  1424. uint64_t dst_offset, uint32_t byte_count,
  1425. struct reservation_object *resv,
  1426. struct dma_fence **fence, bool direct_submit,
  1427. bool vm_needs_flush)
  1428. {
  1429. struct amdgpu_device *adev = ring->adev;
  1430. struct amdgpu_job *job;
  1431. uint32_t max_bytes;
  1432. unsigned num_loops, num_dw;
  1433. unsigned i;
  1434. int r;
  1435. if (direct_submit && !ring->ready) {
  1436. DRM_ERROR("Trying to move memory with ring turned off.\n");
  1437. return -EINVAL;
  1438. }
  1439. max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
  1440. num_loops = DIV_ROUND_UP(byte_count, max_bytes);
  1441. num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
  1442. /* for IB padding */
  1443. while (num_dw & 0x7)
  1444. num_dw++;
  1445. r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
  1446. if (r)
  1447. return r;
  1448. job->vm_needs_flush = vm_needs_flush;
  1449. if (resv) {
  1450. r = amdgpu_sync_resv(adev, &job->sync, resv,
  1451. AMDGPU_FENCE_OWNER_UNDEFINED,
  1452. false);
  1453. if (r) {
  1454. DRM_ERROR("sync failed (%d).\n", r);
  1455. goto error_free;
  1456. }
  1457. }
  1458. for (i = 0; i < num_loops; i++) {
  1459. uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
  1460. amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
  1461. dst_offset, cur_size_in_bytes);
  1462. src_offset += cur_size_in_bytes;
  1463. dst_offset += cur_size_in_bytes;
  1464. byte_count -= cur_size_in_bytes;
  1465. }
  1466. amdgpu_ring_pad_ib(ring, &job->ibs[0]);
  1467. WARN_ON(job->ibs[0].length_dw > num_dw);
  1468. if (direct_submit) {
  1469. r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
  1470. NULL, fence);
  1471. job->fence = dma_fence_get(*fence);
  1472. if (r)
  1473. DRM_ERROR("Error scheduling IBs (%d)\n", r);
  1474. amdgpu_job_free(job);
  1475. } else {
  1476. r = amdgpu_job_submit(job, ring, &adev->mman.entity,
  1477. AMDGPU_FENCE_OWNER_UNDEFINED, fence);
  1478. if (r)
  1479. goto error_free;
  1480. }
  1481. return r;
  1482. error_free:
  1483. amdgpu_job_free(job);
  1484. return r;
  1485. }
  1486. int amdgpu_fill_buffer(struct amdgpu_bo *bo,
  1487. uint32_t src_data,
  1488. struct reservation_object *resv,
  1489. struct dma_fence **fence)
  1490. {
  1491. struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
  1492. uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
  1493. struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
  1494. struct drm_mm_node *mm_node;
  1495. unsigned long num_pages;
  1496. unsigned int num_loops, num_dw;
  1497. struct amdgpu_job *job;
  1498. int r;
  1499. if (!adev->mman.buffer_funcs_enabled) {
  1500. DRM_ERROR("Trying to clear memory with ring turned off.\n");
  1501. return -EINVAL;
  1502. }
  1503. if (bo->tbo.mem.mem_type == TTM_PL_TT) {
  1504. r = amdgpu_ttm_alloc_gart(&bo->tbo);
  1505. if (r)
  1506. return r;
  1507. }
  1508. num_pages = bo->tbo.num_pages;
  1509. mm_node = bo->tbo.mem.mm_node;
  1510. num_loops = 0;
  1511. while (num_pages) {
  1512. uint32_t byte_count = mm_node->size << PAGE_SHIFT;
  1513. num_loops += DIV_ROUND_UP(byte_count, max_bytes);
  1514. num_pages -= mm_node->size;
  1515. ++mm_node;
  1516. }
  1517. num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
  1518. /* for IB padding */
  1519. num_dw += 64;
  1520. r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
  1521. if (r)
  1522. return r;
  1523. if (resv) {
  1524. r = amdgpu_sync_resv(adev, &job->sync, resv,
  1525. AMDGPU_FENCE_OWNER_UNDEFINED, false);
  1526. if (r) {
  1527. DRM_ERROR("sync failed (%d).\n", r);
  1528. goto error_free;
  1529. }
  1530. }
  1531. num_pages = bo->tbo.num_pages;
  1532. mm_node = bo->tbo.mem.mm_node;
  1533. while (num_pages) {
  1534. uint32_t byte_count = mm_node->size << PAGE_SHIFT;
  1535. uint64_t dst_addr;
  1536. dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
  1537. while (byte_count) {
  1538. uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
  1539. amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
  1540. dst_addr, cur_size_in_bytes);
  1541. dst_addr += cur_size_in_bytes;
  1542. byte_count -= cur_size_in_bytes;
  1543. }
  1544. num_pages -= mm_node->size;
  1545. ++mm_node;
  1546. }
  1547. amdgpu_ring_pad_ib(ring, &job->ibs[0]);
  1548. WARN_ON(job->ibs[0].length_dw > num_dw);
  1549. r = amdgpu_job_submit(job, ring, &adev->mman.entity,
  1550. AMDGPU_FENCE_OWNER_UNDEFINED, fence);
  1551. if (r)
  1552. goto error_free;
  1553. return 0;
  1554. error_free:
  1555. amdgpu_job_free(job);
  1556. return r;
  1557. }
  1558. #if defined(CONFIG_DEBUG_FS)
  1559. static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
  1560. {
  1561. struct drm_info_node *node = (struct drm_info_node *)m->private;
  1562. unsigned ttm_pl = *(int *)node->info_ent->data;
  1563. struct drm_device *dev = node->minor->dev;
  1564. struct amdgpu_device *adev = dev->dev_private;
  1565. struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl];
  1566. struct drm_printer p = drm_seq_file_printer(m);
  1567. man->func->debug(man, &p);
  1568. return 0;
  1569. }
  1570. static int ttm_pl_vram = TTM_PL_VRAM;
  1571. static int ttm_pl_tt = TTM_PL_TT;
  1572. static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
  1573. {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram},
  1574. {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
  1575. {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
  1576. #ifdef CONFIG_SWIOTLB
  1577. {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
  1578. #endif
  1579. };
  1580. static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
  1581. size_t size, loff_t *pos)
  1582. {
  1583. struct amdgpu_device *adev = file_inode(f)->i_private;
  1584. ssize_t result = 0;
  1585. int r;
  1586. if (size & 0x3 || *pos & 0x3)
  1587. return -EINVAL;
  1588. if (*pos >= adev->gmc.mc_vram_size)
  1589. return -ENXIO;
  1590. while (size) {
  1591. unsigned long flags;
  1592. uint32_t value;
  1593. if (*pos >= adev->gmc.mc_vram_size)
  1594. return result;
  1595. spin_lock_irqsave(&adev->mmio_idx_lock, flags);
  1596. WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
  1597. WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
  1598. value = RREG32_NO_KIQ(mmMM_DATA);
  1599. spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
  1600. r = put_user(value, (uint32_t *)buf);
  1601. if (r)
  1602. return r;
  1603. result += 4;
  1604. buf += 4;
  1605. *pos += 4;
  1606. size -= 4;
  1607. }
  1608. return result;
  1609. }
  1610. static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
  1611. size_t size, loff_t *pos)
  1612. {
  1613. struct amdgpu_device *adev = file_inode(f)->i_private;
  1614. ssize_t result = 0;
  1615. int r;
  1616. if (size & 0x3 || *pos & 0x3)
  1617. return -EINVAL;
  1618. if (*pos >= adev->gmc.mc_vram_size)
  1619. return -ENXIO;
  1620. while (size) {
  1621. unsigned long flags;
  1622. uint32_t value;
  1623. if (*pos >= adev->gmc.mc_vram_size)
  1624. return result;
  1625. r = get_user(value, (uint32_t *)buf);
  1626. if (r)
  1627. return r;
  1628. spin_lock_irqsave(&adev->mmio_idx_lock, flags);
  1629. WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
  1630. WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
  1631. WREG32_NO_KIQ(mmMM_DATA, value);
  1632. spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
  1633. result += 4;
  1634. buf += 4;
  1635. *pos += 4;
  1636. size -= 4;
  1637. }
  1638. return result;
  1639. }
  1640. static const struct file_operations amdgpu_ttm_vram_fops = {
  1641. .owner = THIS_MODULE,
  1642. .read = amdgpu_ttm_vram_read,
  1643. .write = amdgpu_ttm_vram_write,
  1644. .llseek = default_llseek,
  1645. };
  1646. #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
  1647. static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
  1648. size_t size, loff_t *pos)
  1649. {
  1650. struct amdgpu_device *adev = file_inode(f)->i_private;
  1651. ssize_t result = 0;
  1652. int r;
  1653. while (size) {
  1654. loff_t p = *pos / PAGE_SIZE;
  1655. unsigned off = *pos & ~PAGE_MASK;
  1656. size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
  1657. struct page *page;
  1658. void *ptr;
  1659. if (p >= adev->gart.num_cpu_pages)
  1660. return result;
  1661. page = adev->gart.pages[p];
  1662. if (page) {
  1663. ptr = kmap(page);
  1664. ptr += off;
  1665. r = copy_to_user(buf, ptr, cur_size);
  1666. kunmap(adev->gart.pages[p]);
  1667. } else
  1668. r = clear_user(buf, cur_size);
  1669. if (r)
  1670. return -EFAULT;
  1671. result += cur_size;
  1672. buf += cur_size;
  1673. *pos += cur_size;
  1674. size -= cur_size;
  1675. }
  1676. return result;
  1677. }
  1678. static const struct file_operations amdgpu_ttm_gtt_fops = {
  1679. .owner = THIS_MODULE,
  1680. .read = amdgpu_ttm_gtt_read,
  1681. .llseek = default_llseek
  1682. };
  1683. #endif
  1684. static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
  1685. size_t size, loff_t *pos)
  1686. {
  1687. struct amdgpu_device *adev = file_inode(f)->i_private;
  1688. struct iommu_domain *dom;
  1689. ssize_t result = 0;
  1690. int r;
  1691. dom = iommu_get_domain_for_dev(adev->dev);
  1692. while (size) {
  1693. phys_addr_t addr = *pos & PAGE_MASK;
  1694. loff_t off = *pos & ~PAGE_MASK;
  1695. size_t bytes = PAGE_SIZE - off;
  1696. unsigned long pfn;
  1697. struct page *p;
  1698. void *ptr;
  1699. bytes = bytes < size ? bytes : size;
  1700. addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
  1701. pfn = addr >> PAGE_SHIFT;
  1702. if (!pfn_valid(pfn))
  1703. return -EPERM;
  1704. p = pfn_to_page(pfn);
  1705. if (p->mapping != adev->mman.bdev.dev_mapping)
  1706. return -EPERM;
  1707. ptr = kmap(p);
  1708. r = copy_to_user(buf, ptr + off, bytes);
  1709. kunmap(p);
  1710. if (r)
  1711. return -EFAULT;
  1712. size -= bytes;
  1713. *pos += bytes;
  1714. result += bytes;
  1715. }
  1716. return result;
  1717. }
  1718. static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
  1719. size_t size, loff_t *pos)
  1720. {
  1721. struct amdgpu_device *adev = file_inode(f)->i_private;
  1722. struct iommu_domain *dom;
  1723. ssize_t result = 0;
  1724. int r;
  1725. dom = iommu_get_domain_for_dev(adev->dev);
  1726. while (size) {
  1727. phys_addr_t addr = *pos & PAGE_MASK;
  1728. loff_t off = *pos & ~PAGE_MASK;
  1729. size_t bytes = PAGE_SIZE - off;
  1730. unsigned long pfn;
  1731. struct page *p;
  1732. void *ptr;
  1733. bytes = bytes < size ? bytes : size;
  1734. addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
  1735. pfn = addr >> PAGE_SHIFT;
  1736. if (!pfn_valid(pfn))
  1737. return -EPERM;
  1738. p = pfn_to_page(pfn);
  1739. if (p->mapping != adev->mman.bdev.dev_mapping)
  1740. return -EPERM;
  1741. ptr = kmap(p);
  1742. r = copy_from_user(ptr + off, buf, bytes);
  1743. kunmap(p);
  1744. if (r)
  1745. return -EFAULT;
  1746. size -= bytes;
  1747. *pos += bytes;
  1748. result += bytes;
  1749. }
  1750. return result;
  1751. }
  1752. static const struct file_operations amdgpu_ttm_iomem_fops = {
  1753. .owner = THIS_MODULE,
  1754. .read = amdgpu_iomem_read,
  1755. .write = amdgpu_iomem_write,
  1756. .llseek = default_llseek
  1757. };
  1758. static const struct {
  1759. char *name;
  1760. const struct file_operations *fops;
  1761. int domain;
  1762. } ttm_debugfs_entries[] = {
  1763. { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM },
  1764. #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
  1765. { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
  1766. #endif
  1767. { "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM },
  1768. };
  1769. #endif
  1770. static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
  1771. {
  1772. #if defined(CONFIG_DEBUG_FS)
  1773. unsigned count;
  1774. struct drm_minor *minor = adev->ddev->primary;
  1775. struct dentry *ent, *root = minor->debugfs_root;
  1776. for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
  1777. ent = debugfs_create_file(
  1778. ttm_debugfs_entries[count].name,
  1779. S_IFREG | S_IRUGO, root,
  1780. adev,
  1781. ttm_debugfs_entries[count].fops);
  1782. if (IS_ERR(ent))
  1783. return PTR_ERR(ent);
  1784. if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
  1785. i_size_write(ent->d_inode, adev->gmc.mc_vram_size);
  1786. else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
  1787. i_size_write(ent->d_inode, adev->gmc.gart_size);
  1788. adev->mman.debugfs_entries[count] = ent;
  1789. }
  1790. count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
  1791. #ifdef CONFIG_SWIOTLB
  1792. if (!(adev->need_swiotlb && swiotlb_nr_tbl()))
  1793. --count;
  1794. #endif
  1795. return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
  1796. #else
  1797. return 0;
  1798. #endif
  1799. }
  1800. static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
  1801. {
  1802. #if defined(CONFIG_DEBUG_FS)
  1803. unsigned i;
  1804. for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++)
  1805. debugfs_remove(adev->mman.debugfs_entries[i]);
  1806. #endif
  1807. }