amdgpu_object.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087
  1. /*
  2. * Copyright 2009 Jerome Glisse.
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the
  7. * "Software"), to deal in the Software without restriction, including
  8. * without limitation the rights to use, copy, modify, merge, publish,
  9. * distribute, sub license, and/or sell copies of the Software, and to
  10. * permit persons to whom the Software is furnished to do so, subject to
  11. * the following conditions:
  12. *
  13. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20. *
  21. * The above copyright notice and this permission notice (including the
  22. * next paragraph) shall be included in all copies or substantial portions
  23. * of the Software.
  24. *
  25. */
  26. /*
  27. * Authors:
  28. * Jerome Glisse <glisse@freedesktop.org>
  29. * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
  30. * Dave Airlie
  31. */
  32. #include <linux/list.h>
  33. #include <linux/slab.h>
  34. #include <drm/drmP.h>
  35. #include <drm/amdgpu_drm.h>
  36. #include <drm/drm_cache.h>
  37. #include "amdgpu.h"
  38. #include "amdgpu_trace.h"
  39. static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev,
  40. struct ttm_mem_reg *mem)
  41. {
  42. if (mem->start << PAGE_SHIFT >= adev->mc.visible_vram_size)
  43. return 0;
  44. return ((mem->start << PAGE_SHIFT) + mem->size) >
  45. adev->mc.visible_vram_size ?
  46. adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) :
  47. mem->size;
  48. }
  49. static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
  50. struct ttm_mem_reg *old_mem,
  51. struct ttm_mem_reg *new_mem)
  52. {
  53. u64 vis_size;
  54. if (!adev)
  55. return;
  56. if (new_mem) {
  57. switch (new_mem->mem_type) {
  58. case TTM_PL_TT:
  59. break;
  60. case TTM_PL_VRAM:
  61. atomic64_add(new_mem->size, &adev->vram_usage);
  62. vis_size = amdgpu_get_vis_part_size(adev, new_mem);
  63. atomic64_add(vis_size, &adev->vram_vis_usage);
  64. break;
  65. }
  66. }
  67. if (old_mem) {
  68. switch (old_mem->mem_type) {
  69. case TTM_PL_TT:
  70. break;
  71. case TTM_PL_VRAM:
  72. atomic64_sub(old_mem->size, &adev->vram_usage);
  73. vis_size = amdgpu_get_vis_part_size(adev, old_mem);
  74. atomic64_sub(vis_size, &adev->vram_vis_usage);
  75. break;
  76. }
  77. }
  78. }
  79. static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
  80. {
  81. struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
  82. struct amdgpu_bo *bo;
  83. bo = container_of(tbo, struct amdgpu_bo, tbo);
  84. amdgpu_bo_kunmap(bo);
  85. amdgpu_update_memory_usage(adev, &bo->tbo.mem, NULL);
  86. drm_gem_object_release(&bo->gem_base);
  87. amdgpu_bo_unref(&bo->parent);
  88. if (!list_empty(&bo->shadow_list)) {
  89. mutex_lock(&adev->shadow_list_lock);
  90. list_del_init(&bo->shadow_list);
  91. mutex_unlock(&adev->shadow_list_lock);
  92. }
  93. kfree(bo->metadata);
  94. kfree(bo);
  95. }
  96. bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
  97. {
  98. if (bo->destroy == &amdgpu_ttm_bo_destroy)
  99. return true;
  100. return false;
  101. }
  102. static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
  103. struct ttm_placement *placement,
  104. struct ttm_place *places,
  105. u32 domain, u64 flags)
  106. {
  107. u32 c = 0;
  108. if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
  109. unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
  110. places[c].fpfn = 0;
  111. places[c].lpfn = 0;
  112. places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
  113. TTM_PL_FLAG_VRAM;
  114. if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
  115. places[c].lpfn = visible_pfn;
  116. else
  117. places[c].flags |= TTM_PL_FLAG_TOPDOWN;
  118. if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
  119. places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
  120. c++;
  121. }
  122. if (domain & AMDGPU_GEM_DOMAIN_GTT) {
  123. places[c].fpfn = 0;
  124. places[c].lpfn = 0;
  125. places[c].flags = TTM_PL_FLAG_TT;
  126. if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
  127. places[c].flags |= TTM_PL_FLAG_WC |
  128. TTM_PL_FLAG_UNCACHED;
  129. else
  130. places[c].flags |= TTM_PL_FLAG_CACHED;
  131. c++;
  132. }
  133. if (domain & AMDGPU_GEM_DOMAIN_CPU) {
  134. places[c].fpfn = 0;
  135. places[c].lpfn = 0;
  136. places[c].flags = TTM_PL_FLAG_SYSTEM;
  137. if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
  138. places[c].flags |= TTM_PL_FLAG_WC |
  139. TTM_PL_FLAG_UNCACHED;
  140. else
  141. places[c].flags |= TTM_PL_FLAG_CACHED;
  142. c++;
  143. }
  144. if (domain & AMDGPU_GEM_DOMAIN_GDS) {
  145. places[c].fpfn = 0;
  146. places[c].lpfn = 0;
  147. places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS;
  148. c++;
  149. }
  150. if (domain & AMDGPU_GEM_DOMAIN_GWS) {
  151. places[c].fpfn = 0;
  152. places[c].lpfn = 0;
  153. places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS;
  154. c++;
  155. }
  156. if (domain & AMDGPU_GEM_DOMAIN_OA) {
  157. places[c].fpfn = 0;
  158. places[c].lpfn = 0;
  159. places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA;
  160. c++;
  161. }
  162. if (!c) {
  163. places[c].fpfn = 0;
  164. places[c].lpfn = 0;
  165. places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
  166. c++;
  167. }
  168. placement->num_placement = c;
  169. placement->placement = places;
  170. placement->num_busy_placement = c;
  171. placement->busy_placement = places;
  172. }
  173. void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
  174. {
  175. struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
  176. amdgpu_ttm_placement_init(adev, &abo->placement, abo->placements,
  177. domain, abo->flags);
  178. }
  179. static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
  180. struct ttm_placement *placement)
  181. {
  182. BUG_ON(placement->num_placement > (AMDGPU_GEM_DOMAIN_MAX + 1));
  183. memcpy(bo->placements, placement->placement,
  184. placement->num_placement * sizeof(struct ttm_place));
  185. bo->placement.num_placement = placement->num_placement;
  186. bo->placement.num_busy_placement = placement->num_busy_placement;
  187. bo->placement.placement = bo->placements;
  188. bo->placement.busy_placement = bo->placements;
  189. }
  190. /**
  191. * amdgpu_bo_create_reserved - create reserved BO for kernel use
  192. *
  193. * @adev: amdgpu device object
  194. * @size: size for the new BO
  195. * @align: alignment for the new BO
  196. * @domain: where to place it
  197. * @bo_ptr: resulting BO
  198. * @gpu_addr: GPU addr of the pinned BO
  199. * @cpu_addr: optional CPU address mapping
  200. *
  201. * Allocates and pins a BO for kernel internal use, and returns it still
  202. * reserved.
  203. *
  204. * Returns 0 on success, negative error code otherwise.
  205. */
  206. int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
  207. unsigned long size, int align,
  208. u32 domain, struct amdgpu_bo **bo_ptr,
  209. u64 *gpu_addr, void **cpu_addr)
  210. {
  211. bool free = false;
  212. int r;
  213. if (!*bo_ptr) {
  214. r = amdgpu_bo_create(adev, size, align, true, domain,
  215. AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
  216. AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
  217. NULL, NULL, 0, bo_ptr);
  218. if (r) {
  219. dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
  220. r);
  221. return r;
  222. }
  223. free = true;
  224. }
  225. r = amdgpu_bo_reserve(*bo_ptr, false);
  226. if (r) {
  227. dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
  228. goto error_free;
  229. }
  230. r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr);
  231. if (r) {
  232. dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
  233. goto error_unreserve;
  234. }
  235. if (cpu_addr) {
  236. r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
  237. if (r) {
  238. dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
  239. goto error_unreserve;
  240. }
  241. }
  242. return 0;
  243. error_unreserve:
  244. amdgpu_bo_unreserve(*bo_ptr);
  245. error_free:
  246. if (free)
  247. amdgpu_bo_unref(bo_ptr);
  248. return r;
  249. }
  250. /**
  251. * amdgpu_bo_create_kernel - create BO for kernel use
  252. *
  253. * @adev: amdgpu device object
  254. * @size: size for the new BO
  255. * @align: alignment for the new BO
  256. * @domain: where to place it
  257. * @bo_ptr: resulting BO
  258. * @gpu_addr: GPU addr of the pinned BO
  259. * @cpu_addr: optional CPU address mapping
  260. *
  261. * Allocates and pins a BO for kernel internal use.
  262. *
  263. * Returns 0 on success, negative error code otherwise.
  264. */
  265. int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
  266. unsigned long size, int align,
  267. u32 domain, struct amdgpu_bo **bo_ptr,
  268. u64 *gpu_addr, void **cpu_addr)
  269. {
  270. int r;
  271. r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
  272. gpu_addr, cpu_addr);
  273. if (r)
  274. return r;
  275. amdgpu_bo_unreserve(*bo_ptr);
  276. return 0;
  277. }
  278. /**
  279. * amdgpu_bo_free_kernel - free BO for kernel use
  280. *
  281. * @bo: amdgpu BO to free
  282. *
  283. * unmaps and unpin a BO for kernel internal use.
  284. */
  285. void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
  286. void **cpu_addr)
  287. {
  288. if (*bo == NULL)
  289. return;
  290. if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
  291. if (cpu_addr)
  292. amdgpu_bo_kunmap(*bo);
  293. amdgpu_bo_unpin(*bo);
  294. amdgpu_bo_unreserve(*bo);
  295. }
  296. amdgpu_bo_unref(bo);
  297. if (gpu_addr)
  298. *gpu_addr = 0;
  299. if (cpu_addr)
  300. *cpu_addr = NULL;
  301. }
  302. int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
  303. unsigned long size, int byte_align,
  304. bool kernel, u32 domain, u64 flags,
  305. struct sg_table *sg,
  306. struct ttm_placement *placement,
  307. struct reservation_object *resv,
  308. uint64_t init_value,
  309. struct amdgpu_bo **bo_ptr)
  310. {
  311. struct amdgpu_bo *bo;
  312. enum ttm_bo_type type;
  313. unsigned long page_align;
  314. u64 initial_bytes_moved, bytes_moved;
  315. size_t acc_size;
  316. int r;
  317. page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
  318. size = ALIGN(size, PAGE_SIZE);
  319. if (kernel) {
  320. type = ttm_bo_type_kernel;
  321. } else if (sg) {
  322. type = ttm_bo_type_sg;
  323. } else {
  324. type = ttm_bo_type_device;
  325. }
  326. *bo_ptr = NULL;
  327. acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
  328. sizeof(struct amdgpu_bo));
  329. bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
  330. if (bo == NULL)
  331. return -ENOMEM;
  332. r = drm_gem_object_init(adev->ddev, &bo->gem_base, size);
  333. if (unlikely(r)) {
  334. kfree(bo);
  335. return r;
  336. }
  337. INIT_LIST_HEAD(&bo->shadow_list);
  338. INIT_LIST_HEAD(&bo->va);
  339. bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
  340. AMDGPU_GEM_DOMAIN_GTT |
  341. AMDGPU_GEM_DOMAIN_CPU |
  342. AMDGPU_GEM_DOMAIN_GDS |
  343. AMDGPU_GEM_DOMAIN_GWS |
  344. AMDGPU_GEM_DOMAIN_OA);
  345. bo->allowed_domains = bo->preferred_domains;
  346. if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
  347. bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
  348. bo->flags = flags;
  349. #ifdef CONFIG_X86_32
  350. /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
  351. * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
  352. */
  353. bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
  354. #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
  355. /* Don't try to enable write-combining when it can't work, or things
  356. * may be slow
  357. * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
  358. */
  359. #ifndef CONFIG_COMPILE_TEST
  360. #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
  361. thanks to write-combining
  362. #endif
  363. if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
  364. DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
  365. "better performance thanks to write-combining\n");
  366. bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
  367. #else
  368. /* For architectures that don't support WC memory,
  369. * mask out the WC flag from the BO
  370. */
  371. if (!drm_arch_can_wc_memory())
  372. bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
  373. #endif
  374. amdgpu_fill_placement_to_bo(bo, placement);
  375. /* Kernel allocation are uninterruptible */
  376. initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
  377. r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
  378. &bo->placement, page_align, !kernel, NULL,
  379. acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
  380. bytes_moved = atomic64_read(&adev->num_bytes_moved) -
  381. initial_bytes_moved;
  382. if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
  383. bo->tbo.mem.mem_type == TTM_PL_VRAM &&
  384. bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT)
  385. amdgpu_cs_report_moved_bytes(adev, bytes_moved, bytes_moved);
  386. else
  387. amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0);
  388. if (unlikely(r != 0))
  389. return r;
  390. if (kernel)
  391. bo->tbo.priority = 1;
  392. if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
  393. bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
  394. struct dma_fence *fence;
  395. r = amdgpu_fill_buffer(bo, init_value, bo->tbo.resv, &fence);
  396. if (unlikely(r))
  397. goto fail_unreserve;
  398. amdgpu_bo_fence(bo, fence, false);
  399. dma_fence_put(bo->tbo.moving);
  400. bo->tbo.moving = dma_fence_get(fence);
  401. dma_fence_put(fence);
  402. }
  403. if (!resv)
  404. amdgpu_bo_unreserve(bo);
  405. *bo_ptr = bo;
  406. trace_amdgpu_bo_create(bo);
  407. /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
  408. if (type == ttm_bo_type_device)
  409. bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
  410. return 0;
  411. fail_unreserve:
  412. if (!resv)
  413. ww_mutex_unlock(&bo->tbo.resv->lock);
  414. amdgpu_bo_unref(&bo);
  415. return r;
  416. }
  417. static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
  418. unsigned long size, int byte_align,
  419. struct amdgpu_bo *bo)
  420. {
  421. struct ttm_placement placement = {0};
  422. struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
  423. int r;
  424. if (bo->shadow)
  425. return 0;
  426. bo->flags |= AMDGPU_GEM_CREATE_SHADOW;
  427. memset(&placements, 0,
  428. (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
  429. amdgpu_ttm_placement_init(adev, &placement,
  430. placements, AMDGPU_GEM_DOMAIN_GTT,
  431. AMDGPU_GEM_CREATE_CPU_GTT_USWC);
  432. r = amdgpu_bo_create_restricted(adev, size, byte_align, true,
  433. AMDGPU_GEM_DOMAIN_GTT,
  434. AMDGPU_GEM_CREATE_CPU_GTT_USWC,
  435. NULL, &placement,
  436. bo->tbo.resv,
  437. 0,
  438. &bo->shadow);
  439. if (!r) {
  440. bo->shadow->parent = amdgpu_bo_ref(bo);
  441. mutex_lock(&adev->shadow_list_lock);
  442. list_add_tail(&bo->shadow_list, &adev->shadow_list);
  443. mutex_unlock(&adev->shadow_list_lock);
  444. }
  445. return r;
  446. }
  447. /* init_value will only take effect when flags contains
  448. * AMDGPU_GEM_CREATE_VRAM_CLEARED.
  449. */
  450. int amdgpu_bo_create(struct amdgpu_device *adev,
  451. unsigned long size, int byte_align,
  452. bool kernel, u32 domain, u64 flags,
  453. struct sg_table *sg,
  454. struct reservation_object *resv,
  455. uint64_t init_value,
  456. struct amdgpu_bo **bo_ptr)
  457. {
  458. struct ttm_placement placement = {0};
  459. struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
  460. int r;
  461. memset(&placements, 0,
  462. (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
  463. amdgpu_ttm_placement_init(adev, &placement,
  464. placements, domain, flags);
  465. r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
  466. domain, flags, sg, &placement,
  467. resv, init_value, bo_ptr);
  468. if (r)
  469. return r;
  470. if (amdgpu_need_backup(adev) && (flags & AMDGPU_GEM_CREATE_SHADOW)) {
  471. if (!resv) {
  472. r = ww_mutex_lock(&(*bo_ptr)->tbo.resv->lock, NULL);
  473. WARN_ON(r != 0);
  474. }
  475. r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr));
  476. if (!resv)
  477. ww_mutex_unlock(&(*bo_ptr)->tbo.resv->lock);
  478. if (r)
  479. amdgpu_bo_unref(bo_ptr);
  480. }
  481. return r;
  482. }
  483. int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
  484. struct amdgpu_ring *ring,
  485. struct amdgpu_bo *bo,
  486. struct reservation_object *resv,
  487. struct dma_fence **fence,
  488. bool direct)
  489. {
  490. struct amdgpu_bo *shadow = bo->shadow;
  491. uint64_t bo_addr, shadow_addr;
  492. int r;
  493. if (!shadow)
  494. return -EINVAL;
  495. bo_addr = amdgpu_bo_gpu_offset(bo);
  496. shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
  497. r = reservation_object_reserve_shared(bo->tbo.resv);
  498. if (r)
  499. goto err;
  500. r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
  501. amdgpu_bo_size(bo), resv, fence,
  502. direct, false);
  503. if (!r)
  504. amdgpu_bo_fence(bo, *fence, true);
  505. err:
  506. return r;
  507. }
  508. int amdgpu_bo_validate(struct amdgpu_bo *bo)
  509. {
  510. uint32_t domain;
  511. int r;
  512. if (bo->pin_count)
  513. return 0;
  514. domain = bo->preferred_domains;
  515. retry:
  516. amdgpu_ttm_placement_from_domain(bo, domain);
  517. r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
  518. if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
  519. domain = bo->allowed_domains;
  520. goto retry;
  521. }
  522. return r;
  523. }
  524. int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
  525. struct amdgpu_ring *ring,
  526. struct amdgpu_bo *bo,
  527. struct reservation_object *resv,
  528. struct dma_fence **fence,
  529. bool direct)
  530. {
  531. struct amdgpu_bo *shadow = bo->shadow;
  532. uint64_t bo_addr, shadow_addr;
  533. int r;
  534. if (!shadow)
  535. return -EINVAL;
  536. bo_addr = amdgpu_bo_gpu_offset(bo);
  537. shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
  538. r = reservation_object_reserve_shared(bo->tbo.resv);
  539. if (r)
  540. goto err;
  541. r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
  542. amdgpu_bo_size(bo), resv, fence,
  543. direct, false);
  544. if (!r)
  545. amdgpu_bo_fence(bo, *fence, true);
  546. err:
  547. return r;
  548. }
  549. int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
  550. {
  551. void *kptr;
  552. long r;
  553. if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
  554. return -EPERM;
  555. kptr = amdgpu_bo_kptr(bo);
  556. if (kptr) {
  557. if (ptr)
  558. *ptr = kptr;
  559. return 0;
  560. }
  561. r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false,
  562. MAX_SCHEDULE_TIMEOUT);
  563. if (r < 0)
  564. return r;
  565. r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
  566. if (r)
  567. return r;
  568. if (ptr)
  569. *ptr = amdgpu_bo_kptr(bo);
  570. return 0;
  571. }
  572. void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
  573. {
  574. bool is_iomem;
  575. return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
  576. }
  577. void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
  578. {
  579. if (bo->kmap.bo)
  580. ttm_bo_kunmap(&bo->kmap);
  581. }
  582. struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
  583. {
  584. if (bo == NULL)
  585. return NULL;
  586. ttm_bo_reference(&bo->tbo);
  587. return bo;
  588. }
  589. void amdgpu_bo_unref(struct amdgpu_bo **bo)
  590. {
  591. struct ttm_buffer_object *tbo;
  592. if ((*bo) == NULL)
  593. return;
  594. tbo = &((*bo)->tbo);
  595. ttm_bo_unref(&tbo);
  596. if (tbo == NULL)
  597. *bo = NULL;
  598. }
  599. int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
  600. u64 min_offset, u64 max_offset,
  601. u64 *gpu_addr)
  602. {
  603. struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
  604. int r, i;
  605. unsigned fpfn, lpfn;
  606. if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
  607. return -EPERM;
  608. if (WARN_ON_ONCE(min_offset > max_offset))
  609. return -EINVAL;
  610. /* A shared bo cannot be migrated to VRAM */
  611. if (bo->prime_shared_count && (domain == AMDGPU_GEM_DOMAIN_VRAM))
  612. return -EINVAL;
  613. if (bo->pin_count) {
  614. uint32_t mem_type = bo->tbo.mem.mem_type;
  615. if (domain != amdgpu_mem_type_to_domain(mem_type))
  616. return -EINVAL;
  617. bo->pin_count++;
  618. if (gpu_addr)
  619. *gpu_addr = amdgpu_bo_gpu_offset(bo);
  620. if (max_offset != 0) {
  621. u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
  622. WARN_ON_ONCE(max_offset <
  623. (amdgpu_bo_gpu_offset(bo) - domain_start));
  624. }
  625. return 0;
  626. }
  627. bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
  628. amdgpu_ttm_placement_from_domain(bo, domain);
  629. for (i = 0; i < bo->placement.num_placement; i++) {
  630. /* force to pin into visible video ram */
  631. if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
  632. !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
  633. (!max_offset || max_offset >
  634. adev->mc.visible_vram_size)) {
  635. if (WARN_ON_ONCE(min_offset >
  636. adev->mc.visible_vram_size))
  637. return -EINVAL;
  638. fpfn = min_offset >> PAGE_SHIFT;
  639. lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
  640. } else {
  641. fpfn = min_offset >> PAGE_SHIFT;
  642. lpfn = max_offset >> PAGE_SHIFT;
  643. }
  644. if (fpfn > bo->placements[i].fpfn)
  645. bo->placements[i].fpfn = fpfn;
  646. if (!bo->placements[i].lpfn ||
  647. (lpfn && lpfn < bo->placements[i].lpfn))
  648. bo->placements[i].lpfn = lpfn;
  649. bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
  650. }
  651. r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
  652. if (unlikely(r)) {
  653. dev_err(adev->dev, "%p pin failed\n", bo);
  654. goto error;
  655. }
  656. bo->pin_count = 1;
  657. if (gpu_addr != NULL) {
  658. r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
  659. if (unlikely(r)) {
  660. dev_err(adev->dev, "%p bind failed\n", bo);
  661. goto error;
  662. }
  663. *gpu_addr = amdgpu_bo_gpu_offset(bo);
  664. }
  665. if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
  666. adev->vram_pin_size += amdgpu_bo_size(bo);
  667. if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
  668. adev->invisible_pin_size += amdgpu_bo_size(bo);
  669. } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
  670. adev->gart_pin_size += amdgpu_bo_size(bo);
  671. }
  672. error:
  673. return r;
  674. }
  675. int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
  676. {
  677. return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
  678. }
  679. int amdgpu_bo_unpin(struct amdgpu_bo *bo)
  680. {
  681. struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
  682. int r, i;
  683. if (!bo->pin_count) {
  684. dev_warn(adev->dev, "%p unpin not necessary\n", bo);
  685. return 0;
  686. }
  687. bo->pin_count--;
  688. if (bo->pin_count)
  689. return 0;
  690. for (i = 0; i < bo->placement.num_placement; i++) {
  691. bo->placements[i].lpfn = 0;
  692. bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
  693. }
  694. r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
  695. if (unlikely(r)) {
  696. dev_err(adev->dev, "%p validate failed for unpin\n", bo);
  697. goto error;
  698. }
  699. if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
  700. adev->vram_pin_size -= amdgpu_bo_size(bo);
  701. if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
  702. adev->invisible_pin_size -= amdgpu_bo_size(bo);
  703. } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
  704. adev->gart_pin_size -= amdgpu_bo_size(bo);
  705. }
  706. error:
  707. return r;
  708. }
  709. int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
  710. {
  711. /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
  712. if (0 && (adev->flags & AMD_IS_APU)) {
  713. /* Useless to evict on IGP chips */
  714. return 0;
  715. }
  716. return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
  717. }
  718. static const char *amdgpu_vram_names[] = {
  719. "UNKNOWN",
  720. "GDDR1",
  721. "DDR2",
  722. "GDDR3",
  723. "GDDR4",
  724. "GDDR5",
  725. "HBM",
  726. "DDR3"
  727. };
  728. int amdgpu_bo_init(struct amdgpu_device *adev)
  729. {
  730. /* reserve PAT memory space to WC for VRAM */
  731. arch_io_reserve_memtype_wc(adev->mc.aper_base,
  732. adev->mc.aper_size);
  733. /* Add an MTRR for the VRAM */
  734. adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
  735. adev->mc.aper_size);
  736. DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
  737. adev->mc.mc_vram_size >> 20,
  738. (unsigned long long)adev->mc.aper_size >> 20);
  739. DRM_INFO("RAM width %dbits %s\n",
  740. adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
  741. return amdgpu_ttm_init(adev);
  742. }
  743. void amdgpu_bo_fini(struct amdgpu_device *adev)
  744. {
  745. amdgpu_ttm_fini(adev);
  746. arch_phys_wc_del(adev->mc.vram_mtrr);
  747. arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size);
  748. }
  749. int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
  750. struct vm_area_struct *vma)
  751. {
  752. return ttm_fbdev_mmap(vma, &bo->tbo);
  753. }
  754. int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
  755. {
  756. struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
  757. if (adev->family <= AMDGPU_FAMILY_CZ &&
  758. AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
  759. return -EINVAL;
  760. bo->tiling_flags = tiling_flags;
  761. return 0;
  762. }
  763. void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
  764. {
  765. lockdep_assert_held(&bo->tbo.resv->lock.base);
  766. if (tiling_flags)
  767. *tiling_flags = bo->tiling_flags;
  768. }
  769. int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
  770. uint32_t metadata_size, uint64_t flags)
  771. {
  772. void *buffer;
  773. if (!metadata_size) {
  774. if (bo->metadata_size) {
  775. kfree(bo->metadata);
  776. bo->metadata = NULL;
  777. bo->metadata_size = 0;
  778. }
  779. return 0;
  780. }
  781. if (metadata == NULL)
  782. return -EINVAL;
  783. buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
  784. if (buffer == NULL)
  785. return -ENOMEM;
  786. kfree(bo->metadata);
  787. bo->metadata_flags = flags;
  788. bo->metadata = buffer;
  789. bo->metadata_size = metadata_size;
  790. return 0;
  791. }
  792. int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
  793. size_t buffer_size, uint32_t *metadata_size,
  794. uint64_t *flags)
  795. {
  796. if (!buffer && !metadata_size)
  797. return -EINVAL;
  798. if (buffer) {
  799. if (buffer_size < bo->metadata_size)
  800. return -EINVAL;
  801. if (bo->metadata_size)
  802. memcpy(buffer, bo->metadata, bo->metadata_size);
  803. }
  804. if (metadata_size)
  805. *metadata_size = bo->metadata_size;
  806. if (flags)
  807. *flags = bo->metadata_flags;
  808. return 0;
  809. }
  810. void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
  811. bool evict,
  812. struct ttm_mem_reg *new_mem)
  813. {
  814. struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
  815. struct amdgpu_bo *abo;
  816. struct ttm_mem_reg *old_mem = &bo->mem;
  817. if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
  818. return;
  819. abo = container_of(bo, struct amdgpu_bo, tbo);
  820. amdgpu_vm_bo_invalidate(adev, abo);
  821. amdgpu_bo_kunmap(abo);
  822. /* remember the eviction */
  823. if (evict)
  824. atomic64_inc(&adev->num_evictions);
  825. /* update statistics */
  826. if (!new_mem)
  827. return;
  828. /* move_notify is called before move happens */
  829. amdgpu_update_memory_usage(adev, &bo->mem, new_mem);
  830. trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
  831. }
  832. int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
  833. {
  834. struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
  835. struct amdgpu_bo *abo;
  836. unsigned long offset, size;
  837. int r;
  838. if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
  839. return 0;
  840. abo = container_of(bo, struct amdgpu_bo, tbo);
  841. /* Remember that this BO was accessed by the CPU */
  842. abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
  843. if (bo->mem.mem_type != TTM_PL_VRAM)
  844. return 0;
  845. size = bo->mem.num_pages << PAGE_SHIFT;
  846. offset = bo->mem.start << PAGE_SHIFT;
  847. if ((offset + size) <= adev->mc.visible_vram_size)
  848. return 0;
  849. /* Can't move a pinned BO to visible VRAM */
  850. if (abo->pin_count > 0)
  851. return -EINVAL;
  852. /* hurrah the memory is not visible ! */
  853. atomic64_inc(&adev->num_vram_cpu_page_faults);
  854. amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
  855. AMDGPU_GEM_DOMAIN_GTT);
  856. /* Avoid costly evictions; only set GTT as a busy placement */
  857. abo->placement.num_busy_placement = 1;
  858. abo->placement.busy_placement = &abo->placements[1];
  859. r = ttm_bo_validate(bo, &abo->placement, false, false);
  860. if (unlikely(r != 0))
  861. return r;
  862. offset = bo->mem.start << PAGE_SHIFT;
  863. /* this should never happen */
  864. if (bo->mem.mem_type == TTM_PL_VRAM &&
  865. (offset + size) > adev->mc.visible_vram_size)
  866. return -EINVAL;
  867. return 0;
  868. }
  869. /**
  870. * amdgpu_bo_fence - add fence to buffer object
  871. *
  872. * @bo: buffer object in question
  873. * @fence: fence to add
  874. * @shared: true if fence should be added shared
  875. *
  876. */
  877. void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
  878. bool shared)
  879. {
  880. struct reservation_object *resv = bo->tbo.resv;
  881. if (shared)
  882. reservation_object_add_shared_fence(resv, fence);
  883. else
  884. reservation_object_add_excl_fence(resv, fence);
  885. }
  886. /**
  887. * amdgpu_bo_gpu_offset - return GPU offset of bo
  888. * @bo: amdgpu object for which we query the offset
  889. *
  890. * Returns current GPU offset of the object.
  891. *
  892. * Note: object should either be pinned or reserved when calling this
  893. * function, it might be useful to add check for this for debugging.
  894. */
  895. u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
  896. {
  897. WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
  898. WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
  899. !amdgpu_ttm_is_bound(bo->tbo.ttm));
  900. WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
  901. !bo->pin_count);
  902. WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
  903. WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
  904. !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
  905. return bo->tbo.offset;
  906. }