ttm_bo.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798
  1. /**************************************************************************
  2. *
  3. * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. /*
  28. * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29. */
  30. #define pr_fmt(fmt) "[TTM] " fmt
  31. #include <drm/ttm/ttm_module.h>
  32. #include <drm/ttm/ttm_bo_driver.h>
  33. #include <drm/ttm/ttm_placement.h>
  34. #include <linux/jiffies.h>
  35. #include <linux/slab.h>
  36. #include <linux/sched.h>
  37. #include <linux/mm.h>
  38. #include <linux/file.h>
  39. #include <linux/module.h>
  40. #include <linux/atomic.h>
  41. #include <linux/reservation.h>
  42. #define TTM_ASSERT_LOCKED(param)
  43. #define TTM_DEBUG(fmt, arg...)
  44. #define TTM_BO_HASH_ORDER 13
  45. static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
  46. static void ttm_bo_global_kobj_release(struct kobject *kobj);
  47. static struct attribute ttm_bo_count = {
  48. .name = "bo_count",
  49. .mode = S_IRUGO
  50. };
  51. static inline int ttm_mem_type_from_place(const struct ttm_place *place,
  52. uint32_t *mem_type)
  53. {
  54. int pos;
  55. pos = ffs(place->flags & TTM_PL_MASK_MEM);
  56. if (unlikely(!pos))
  57. return -EINVAL;
  58. *mem_type = pos - 1;
  59. return 0;
  60. }
  61. static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
  62. {
  63. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  64. pr_err(" has_type: %d\n", man->has_type);
  65. pr_err(" use_type: %d\n", man->use_type);
  66. pr_err(" flags: 0x%08X\n", man->flags);
  67. pr_err(" gpu_offset: 0x%08llX\n", man->gpu_offset);
  68. pr_err(" size: %llu\n", man->size);
  69. pr_err(" available_caching: 0x%08X\n", man->available_caching);
  70. pr_err(" default_caching: 0x%08X\n", man->default_caching);
  71. if (mem_type != TTM_PL_SYSTEM)
  72. (*man->func->debug)(man, TTM_PFX);
  73. }
  74. static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
  75. struct ttm_placement *placement)
  76. {
  77. int i, ret, mem_type;
  78. pr_err("No space for %p (%lu pages, %luK, %luM)\n",
  79. bo, bo->mem.num_pages, bo->mem.size >> 10,
  80. bo->mem.size >> 20);
  81. for (i = 0; i < placement->num_placement; i++) {
  82. ret = ttm_mem_type_from_place(&placement->placement[i],
  83. &mem_type);
  84. if (ret)
  85. return;
  86. pr_err(" placement[%d]=0x%08X (%d)\n",
  87. i, placement->placement[i].flags, mem_type);
  88. ttm_mem_type_debug(bo->bdev, mem_type);
  89. }
  90. }
  91. static ssize_t ttm_bo_global_show(struct kobject *kobj,
  92. struct attribute *attr,
  93. char *buffer)
  94. {
  95. struct ttm_bo_global *glob =
  96. container_of(kobj, struct ttm_bo_global, kobj);
  97. return snprintf(buffer, PAGE_SIZE, "%lu\n",
  98. (unsigned long) atomic_read(&glob->bo_count));
  99. }
  100. static struct attribute *ttm_bo_global_attrs[] = {
  101. &ttm_bo_count,
  102. NULL
  103. };
  104. static const struct sysfs_ops ttm_bo_global_ops = {
  105. .show = &ttm_bo_global_show
  106. };
  107. static struct kobj_type ttm_bo_glob_kobj_type = {
  108. .release = &ttm_bo_global_kobj_release,
  109. .sysfs_ops = &ttm_bo_global_ops,
  110. .default_attrs = ttm_bo_global_attrs
  111. };
  112. static inline uint32_t ttm_bo_type_flags(unsigned type)
  113. {
  114. return 1 << (type);
  115. }
  116. static void ttm_bo_release_list(struct kref *list_kref)
  117. {
  118. struct ttm_buffer_object *bo =
  119. container_of(list_kref, struct ttm_buffer_object, list_kref);
  120. struct ttm_bo_device *bdev = bo->bdev;
  121. size_t acc_size = bo->acc_size;
  122. BUG_ON(kref_read(&bo->list_kref));
  123. BUG_ON(kref_read(&bo->kref));
  124. BUG_ON(atomic_read(&bo->cpu_writers));
  125. BUG_ON(bo->mem.mm_node != NULL);
  126. BUG_ON(!list_empty(&bo->lru));
  127. BUG_ON(!list_empty(&bo->ddestroy));
  128. ttm_tt_destroy(bo->ttm);
  129. atomic_dec(&bo->glob->bo_count);
  130. dma_fence_put(bo->moving);
  131. if (bo->resv == &bo->ttm_resv)
  132. reservation_object_fini(&bo->ttm_resv);
  133. mutex_destroy(&bo->wu_mutex);
  134. if (bo->destroy)
  135. bo->destroy(bo);
  136. else {
  137. kfree(bo);
  138. }
  139. ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
  140. }
  141. void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
  142. {
  143. struct ttm_bo_device *bdev = bo->bdev;
  144. struct ttm_mem_type_manager *man;
  145. lockdep_assert_held(&bo->resv->lock.base);
  146. if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
  147. BUG_ON(!list_empty(&bo->lru));
  148. man = &bdev->man[bo->mem.mem_type];
  149. list_add_tail(&bo->lru, &man->lru[bo->priority]);
  150. kref_get(&bo->list_kref);
  151. if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
  152. list_add_tail(&bo->swap,
  153. &bo->glob->swap_lru[bo->priority]);
  154. kref_get(&bo->list_kref);
  155. }
  156. }
  157. }
  158. EXPORT_SYMBOL(ttm_bo_add_to_lru);
  159. static void ttm_bo_ref_bug(struct kref *list_kref)
  160. {
  161. BUG();
  162. }
  163. void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
  164. {
  165. if (!list_empty(&bo->swap)) {
  166. list_del_init(&bo->swap);
  167. kref_put(&bo->list_kref, ttm_bo_ref_bug);
  168. }
  169. if (!list_empty(&bo->lru)) {
  170. list_del_init(&bo->lru);
  171. kref_put(&bo->list_kref, ttm_bo_ref_bug);
  172. }
  173. /*
  174. * TODO: Add a driver hook to delete from
  175. * driver-specific LRU's here.
  176. */
  177. }
  178. void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
  179. {
  180. spin_lock(&bo->glob->lru_lock);
  181. ttm_bo_del_from_lru(bo);
  182. spin_unlock(&bo->glob->lru_lock);
  183. }
  184. EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
  185. void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
  186. {
  187. lockdep_assert_held(&bo->resv->lock.base);
  188. ttm_bo_del_from_lru(bo);
  189. ttm_bo_add_to_lru(bo);
  190. }
  191. EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
  192. /*
  193. * Call bo->mutex locked.
  194. */
  195. static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
  196. {
  197. struct ttm_bo_device *bdev = bo->bdev;
  198. struct ttm_bo_global *glob = bo->glob;
  199. int ret = 0;
  200. uint32_t page_flags = 0;
  201. TTM_ASSERT_LOCKED(&bo->mutex);
  202. bo->ttm = NULL;
  203. if (bdev->need_dma32)
  204. page_flags |= TTM_PAGE_FLAG_DMA32;
  205. switch (bo->type) {
  206. case ttm_bo_type_device:
  207. if (zero_alloc)
  208. page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
  209. case ttm_bo_type_kernel:
  210. bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
  211. page_flags, glob->dummy_read_page);
  212. if (unlikely(bo->ttm == NULL))
  213. ret = -ENOMEM;
  214. break;
  215. case ttm_bo_type_sg:
  216. bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
  217. page_flags | TTM_PAGE_FLAG_SG,
  218. glob->dummy_read_page);
  219. if (unlikely(bo->ttm == NULL)) {
  220. ret = -ENOMEM;
  221. break;
  222. }
  223. bo->ttm->sg = bo->sg;
  224. break;
  225. default:
  226. pr_err("Illegal buffer object type\n");
  227. ret = -EINVAL;
  228. break;
  229. }
  230. return ret;
  231. }
  232. static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
  233. struct ttm_mem_reg *mem,
  234. bool evict, bool interruptible,
  235. bool no_wait_gpu)
  236. {
  237. struct ttm_bo_device *bdev = bo->bdev;
  238. bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
  239. bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
  240. struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
  241. struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
  242. int ret = 0;
  243. if (old_is_pci || new_is_pci ||
  244. ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
  245. ret = ttm_mem_io_lock(old_man, true);
  246. if (unlikely(ret != 0))
  247. goto out_err;
  248. ttm_bo_unmap_virtual_locked(bo);
  249. ttm_mem_io_unlock(old_man);
  250. }
  251. /*
  252. * Create and bind a ttm if required.
  253. */
  254. if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
  255. if (bo->ttm == NULL) {
  256. bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
  257. ret = ttm_bo_add_ttm(bo, zero);
  258. if (ret)
  259. goto out_err;
  260. }
  261. ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
  262. if (ret)
  263. goto out_err;
  264. if (mem->mem_type != TTM_PL_SYSTEM) {
  265. ret = ttm_tt_bind(bo->ttm, mem);
  266. if (ret)
  267. goto out_err;
  268. }
  269. if (bo->mem.mem_type == TTM_PL_SYSTEM) {
  270. if (bdev->driver->move_notify)
  271. bdev->driver->move_notify(bo, evict, mem);
  272. bo->mem = *mem;
  273. mem->mm_node = NULL;
  274. goto moved;
  275. }
  276. }
  277. if (bdev->driver->move_notify)
  278. bdev->driver->move_notify(bo, evict, mem);
  279. if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
  280. !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
  281. ret = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, mem);
  282. else if (bdev->driver->move)
  283. ret = bdev->driver->move(bo, evict, interruptible,
  284. no_wait_gpu, mem);
  285. else
  286. ret = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, mem);
  287. if (ret) {
  288. if (bdev->driver->move_notify) {
  289. struct ttm_mem_reg tmp_mem = *mem;
  290. *mem = bo->mem;
  291. bo->mem = tmp_mem;
  292. bdev->driver->move_notify(bo, false, mem);
  293. bo->mem = *mem;
  294. *mem = tmp_mem;
  295. }
  296. goto out_err;
  297. }
  298. moved:
  299. if (bo->evicted) {
  300. if (bdev->driver->invalidate_caches) {
  301. ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
  302. if (ret)
  303. pr_err("Can not flush read caches\n");
  304. }
  305. bo->evicted = false;
  306. }
  307. if (bo->mem.mm_node) {
  308. bo->offset = (bo->mem.start << PAGE_SHIFT) +
  309. bdev->man[bo->mem.mem_type].gpu_offset;
  310. bo->cur_placement = bo->mem.placement;
  311. } else
  312. bo->offset = 0;
  313. return 0;
  314. out_err:
  315. new_man = &bdev->man[bo->mem.mem_type];
  316. if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) {
  317. ttm_tt_destroy(bo->ttm);
  318. bo->ttm = NULL;
  319. }
  320. return ret;
  321. }
  322. /**
  323. * Call bo::reserved.
  324. * Will release GPU memory type usage on destruction.
  325. * This is the place to put in driver specific hooks to release
  326. * driver private resources.
  327. * Will release the bo::reserved lock.
  328. */
  329. static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
  330. {
  331. if (bo->bdev->driver->move_notify)
  332. bo->bdev->driver->move_notify(bo, false, NULL);
  333. ttm_tt_destroy(bo->ttm);
  334. bo->ttm = NULL;
  335. ttm_bo_mem_put(bo, &bo->mem);
  336. ww_mutex_unlock (&bo->resv->lock);
  337. }
  338. static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
  339. {
  340. struct reservation_object_list *fobj;
  341. struct dma_fence *fence;
  342. int i;
  343. fobj = reservation_object_get_list(bo->resv);
  344. fence = reservation_object_get_excl(bo->resv);
  345. if (fence && !fence->ops->signaled)
  346. dma_fence_enable_sw_signaling(fence);
  347. for (i = 0; fobj && i < fobj->shared_count; ++i) {
  348. fence = rcu_dereference_protected(fobj->shared[i],
  349. reservation_object_held(bo->resv));
  350. if (!fence->ops->signaled)
  351. dma_fence_enable_sw_signaling(fence);
  352. }
  353. }
  354. static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
  355. {
  356. struct ttm_bo_device *bdev = bo->bdev;
  357. struct ttm_bo_global *glob = bo->glob;
  358. int ret;
  359. spin_lock(&glob->lru_lock);
  360. ret = __ttm_bo_reserve(bo, false, true, NULL);
  361. if (!ret) {
  362. if (!ttm_bo_wait(bo, false, true)) {
  363. ttm_bo_del_from_lru(bo);
  364. spin_unlock(&glob->lru_lock);
  365. ttm_bo_cleanup_memtype_use(bo);
  366. return;
  367. } else
  368. ttm_bo_flush_all_fences(bo);
  369. /*
  370. * Make NO_EVICT bos immediately available to
  371. * shrinkers, now that they are queued for
  372. * destruction.
  373. */
  374. if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
  375. bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
  376. ttm_bo_add_to_lru(bo);
  377. }
  378. __ttm_bo_unreserve(bo);
  379. }
  380. kref_get(&bo->list_kref);
  381. list_add_tail(&bo->ddestroy, &bdev->ddestroy);
  382. spin_unlock(&glob->lru_lock);
  383. schedule_delayed_work(&bdev->wq,
  384. ((HZ / 100) < 1) ? 1 : HZ / 100);
  385. }
  386. /**
  387. * function ttm_bo_cleanup_refs_and_unlock
  388. * If bo idle, remove from delayed- and lru lists, and unref.
  389. * If not idle, do nothing.
  390. *
  391. * Must be called with lru_lock and reservation held, this function
  392. * will drop both before returning.
  393. *
  394. * @interruptible Any sleeps should occur interruptibly.
  395. * @no_wait_gpu Never wait for gpu. Return -EBUSY instead.
  396. */
  397. static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
  398. bool interruptible,
  399. bool no_wait_gpu)
  400. {
  401. struct ttm_bo_global *glob = bo->glob;
  402. int ret;
  403. ret = ttm_bo_wait(bo, false, true);
  404. if (ret && !no_wait_gpu) {
  405. long lret;
  406. ww_mutex_unlock(&bo->resv->lock);
  407. spin_unlock(&glob->lru_lock);
  408. lret = reservation_object_wait_timeout_rcu(bo->resv,
  409. true,
  410. interruptible,
  411. 30 * HZ);
  412. if (lret < 0)
  413. return lret;
  414. else if (lret == 0)
  415. return -EBUSY;
  416. spin_lock(&glob->lru_lock);
  417. ret = __ttm_bo_reserve(bo, false, true, NULL);
  418. /*
  419. * We raced, and lost, someone else holds the reservation now,
  420. * and is probably busy in ttm_bo_cleanup_memtype_use.
  421. *
  422. * Even if it's not the case, because we finished waiting any
  423. * delayed destruction would succeed, so just return success
  424. * here.
  425. */
  426. if (ret) {
  427. spin_unlock(&glob->lru_lock);
  428. return 0;
  429. }
  430. /*
  431. * remove sync_obj with ttm_bo_wait, the wait should be
  432. * finished, and no new wait object should have been added.
  433. */
  434. ret = ttm_bo_wait(bo, false, true);
  435. WARN_ON(ret);
  436. }
  437. if (ret || unlikely(list_empty(&bo->ddestroy))) {
  438. __ttm_bo_unreserve(bo);
  439. spin_unlock(&glob->lru_lock);
  440. return ret;
  441. }
  442. ttm_bo_del_from_lru(bo);
  443. list_del_init(&bo->ddestroy);
  444. kref_put(&bo->list_kref, ttm_bo_ref_bug);
  445. spin_unlock(&glob->lru_lock);
  446. ttm_bo_cleanup_memtype_use(bo);
  447. return 0;
  448. }
  449. /**
  450. * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
  451. * encountered buffers.
  452. */
  453. static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
  454. {
  455. struct ttm_bo_global *glob = bdev->glob;
  456. struct ttm_buffer_object *entry = NULL;
  457. int ret = 0;
  458. spin_lock(&glob->lru_lock);
  459. if (list_empty(&bdev->ddestroy))
  460. goto out_unlock;
  461. entry = list_first_entry(&bdev->ddestroy,
  462. struct ttm_buffer_object, ddestroy);
  463. kref_get(&entry->list_kref);
  464. for (;;) {
  465. struct ttm_buffer_object *nentry = NULL;
  466. if (entry->ddestroy.next != &bdev->ddestroy) {
  467. nentry = list_first_entry(&entry->ddestroy,
  468. struct ttm_buffer_object, ddestroy);
  469. kref_get(&nentry->list_kref);
  470. }
  471. ret = __ttm_bo_reserve(entry, false, true, NULL);
  472. if (remove_all && ret) {
  473. spin_unlock(&glob->lru_lock);
  474. ret = __ttm_bo_reserve(entry, false, false, NULL);
  475. spin_lock(&glob->lru_lock);
  476. }
  477. if (!ret)
  478. ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
  479. !remove_all);
  480. else
  481. spin_unlock(&glob->lru_lock);
  482. kref_put(&entry->list_kref, ttm_bo_release_list);
  483. entry = nentry;
  484. if (ret || !entry)
  485. goto out;
  486. spin_lock(&glob->lru_lock);
  487. if (list_empty(&entry->ddestroy))
  488. break;
  489. }
  490. out_unlock:
  491. spin_unlock(&glob->lru_lock);
  492. out:
  493. if (entry)
  494. kref_put(&entry->list_kref, ttm_bo_release_list);
  495. return ret;
  496. }
  497. static void ttm_bo_delayed_workqueue(struct work_struct *work)
  498. {
  499. struct ttm_bo_device *bdev =
  500. container_of(work, struct ttm_bo_device, wq.work);
  501. if (ttm_bo_delayed_delete(bdev, false)) {
  502. schedule_delayed_work(&bdev->wq,
  503. ((HZ / 100) < 1) ? 1 : HZ / 100);
  504. }
  505. }
  506. static void ttm_bo_release(struct kref *kref)
  507. {
  508. struct ttm_buffer_object *bo =
  509. container_of(kref, struct ttm_buffer_object, kref);
  510. struct ttm_bo_device *bdev = bo->bdev;
  511. struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
  512. drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
  513. ttm_mem_io_lock(man, false);
  514. ttm_mem_io_free_vm(bo);
  515. ttm_mem_io_unlock(man);
  516. ttm_bo_cleanup_refs_or_queue(bo);
  517. kref_put(&bo->list_kref, ttm_bo_release_list);
  518. }
  519. void ttm_bo_unref(struct ttm_buffer_object **p_bo)
  520. {
  521. struct ttm_buffer_object *bo = *p_bo;
  522. *p_bo = NULL;
  523. kref_put(&bo->kref, ttm_bo_release);
  524. }
  525. EXPORT_SYMBOL(ttm_bo_unref);
  526. int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
  527. {
  528. return cancel_delayed_work_sync(&bdev->wq);
  529. }
  530. EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
  531. void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
  532. {
  533. if (resched)
  534. schedule_delayed_work(&bdev->wq,
  535. ((HZ / 100) < 1) ? 1 : HZ / 100);
  536. }
  537. EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
  538. static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
  539. bool no_wait_gpu)
  540. {
  541. struct ttm_bo_device *bdev = bo->bdev;
  542. struct ttm_mem_reg evict_mem;
  543. struct ttm_placement placement;
  544. int ret = 0;
  545. lockdep_assert_held(&bo->resv->lock.base);
  546. evict_mem = bo->mem;
  547. evict_mem.mm_node = NULL;
  548. evict_mem.bus.io_reserved_vm = false;
  549. evict_mem.bus.io_reserved_count = 0;
  550. placement.num_placement = 0;
  551. placement.num_busy_placement = 0;
  552. bdev->driver->evict_flags(bo, &placement);
  553. ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
  554. no_wait_gpu);
  555. if (ret) {
  556. if (ret != -ERESTARTSYS) {
  557. pr_err("Failed to find memory space for buffer 0x%p eviction\n",
  558. bo);
  559. ttm_bo_mem_space_debug(bo, &placement);
  560. }
  561. goto out;
  562. }
  563. ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
  564. no_wait_gpu);
  565. if (unlikely(ret)) {
  566. if (ret != -ERESTARTSYS)
  567. pr_err("Buffer eviction failed\n");
  568. ttm_bo_mem_put(bo, &evict_mem);
  569. goto out;
  570. }
  571. bo->evicted = true;
  572. out:
  573. return ret;
  574. }
  575. bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
  576. const struct ttm_place *place)
  577. {
  578. /* Don't evict this BO if it's outside of the
  579. * requested placement range
  580. */
  581. if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
  582. (place->lpfn && place->lpfn <= bo->mem.start))
  583. return false;
  584. return true;
  585. }
  586. EXPORT_SYMBOL(ttm_bo_eviction_valuable);
  587. static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
  588. uint32_t mem_type,
  589. const struct ttm_place *place,
  590. bool interruptible,
  591. bool no_wait_gpu)
  592. {
  593. struct ttm_bo_global *glob = bdev->glob;
  594. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  595. struct ttm_buffer_object *bo;
  596. int ret = -EBUSY;
  597. unsigned i;
  598. spin_lock(&glob->lru_lock);
  599. for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
  600. list_for_each_entry(bo, &man->lru[i], lru) {
  601. ret = __ttm_bo_reserve(bo, false, true, NULL);
  602. if (ret)
  603. continue;
  604. if (place && !bdev->driver->eviction_valuable(bo,
  605. place)) {
  606. __ttm_bo_unreserve(bo);
  607. ret = -EBUSY;
  608. continue;
  609. }
  610. break;
  611. }
  612. if (!ret)
  613. break;
  614. }
  615. if (ret) {
  616. spin_unlock(&glob->lru_lock);
  617. return ret;
  618. }
  619. kref_get(&bo->list_kref);
  620. if (!list_empty(&bo->ddestroy)) {
  621. ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
  622. no_wait_gpu);
  623. kref_put(&bo->list_kref, ttm_bo_release_list);
  624. return ret;
  625. }
  626. ttm_bo_del_from_lru(bo);
  627. spin_unlock(&glob->lru_lock);
  628. BUG_ON(ret != 0);
  629. ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
  630. ttm_bo_unreserve(bo);
  631. kref_put(&bo->list_kref, ttm_bo_release_list);
  632. return ret;
  633. }
  634. void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
  635. {
  636. struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
  637. if (mem->mm_node)
  638. (*man->func->put_node)(man, mem);
  639. }
  640. EXPORT_SYMBOL(ttm_bo_mem_put);
  641. /**
  642. * Add the last move fence to the BO and reserve a new shared slot.
  643. */
  644. static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
  645. struct ttm_mem_type_manager *man,
  646. struct ttm_mem_reg *mem)
  647. {
  648. struct dma_fence *fence;
  649. int ret;
  650. spin_lock(&man->move_lock);
  651. fence = dma_fence_get(man->move);
  652. spin_unlock(&man->move_lock);
  653. if (fence) {
  654. reservation_object_add_shared_fence(bo->resv, fence);
  655. ret = reservation_object_reserve_shared(bo->resv);
  656. if (unlikely(ret))
  657. return ret;
  658. dma_fence_put(bo->moving);
  659. bo->moving = fence;
  660. }
  661. return 0;
  662. }
  663. /**
  664. * Repeatedly evict memory from the LRU for @mem_type until we create enough
  665. * space, or we've evicted everything and there isn't enough space.
  666. */
  667. static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
  668. uint32_t mem_type,
  669. const struct ttm_place *place,
  670. struct ttm_mem_reg *mem,
  671. bool interruptible,
  672. bool no_wait_gpu)
  673. {
  674. struct ttm_bo_device *bdev = bo->bdev;
  675. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  676. int ret;
  677. do {
  678. ret = (*man->func->get_node)(man, bo, place, mem);
  679. if (unlikely(ret != 0))
  680. return ret;
  681. if (mem->mm_node)
  682. break;
  683. ret = ttm_mem_evict_first(bdev, mem_type, place,
  684. interruptible, no_wait_gpu);
  685. if (unlikely(ret != 0))
  686. return ret;
  687. } while (1);
  688. mem->mem_type = mem_type;
  689. return ttm_bo_add_move_fence(bo, man, mem);
  690. }
  691. static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
  692. uint32_t cur_placement,
  693. uint32_t proposed_placement)
  694. {
  695. uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
  696. uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
  697. /**
  698. * Keep current caching if possible.
  699. */
  700. if ((cur_placement & caching) != 0)
  701. result |= (cur_placement & caching);
  702. else if ((man->default_caching & caching) != 0)
  703. result |= man->default_caching;
  704. else if ((TTM_PL_FLAG_CACHED & caching) != 0)
  705. result |= TTM_PL_FLAG_CACHED;
  706. else if ((TTM_PL_FLAG_WC & caching) != 0)
  707. result |= TTM_PL_FLAG_WC;
  708. else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
  709. result |= TTM_PL_FLAG_UNCACHED;
  710. return result;
  711. }
  712. static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
  713. uint32_t mem_type,
  714. const struct ttm_place *place,
  715. uint32_t *masked_placement)
  716. {
  717. uint32_t cur_flags = ttm_bo_type_flags(mem_type);
  718. if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
  719. return false;
  720. if ((place->flags & man->available_caching) == 0)
  721. return false;
  722. cur_flags |= (place->flags & man->available_caching);
  723. *masked_placement = cur_flags;
  724. return true;
  725. }
  726. /**
  727. * Creates space for memory region @mem according to its type.
  728. *
  729. * This function first searches for free space in compatible memory types in
  730. * the priority order defined by the driver. If free space isn't found, then
  731. * ttm_bo_mem_force_space is attempted in priority order to evict and find
  732. * space.
  733. */
  734. int ttm_bo_mem_space(struct ttm_buffer_object *bo,
  735. struct ttm_placement *placement,
  736. struct ttm_mem_reg *mem,
  737. bool interruptible,
  738. bool no_wait_gpu)
  739. {
  740. struct ttm_bo_device *bdev = bo->bdev;
  741. struct ttm_mem_type_manager *man;
  742. uint32_t mem_type = TTM_PL_SYSTEM;
  743. uint32_t cur_flags = 0;
  744. bool type_found = false;
  745. bool type_ok = false;
  746. bool has_erestartsys = false;
  747. int i, ret;
  748. ret = reservation_object_reserve_shared(bo->resv);
  749. if (unlikely(ret))
  750. return ret;
  751. mem->mm_node = NULL;
  752. for (i = 0; i < placement->num_placement; ++i) {
  753. const struct ttm_place *place = &placement->placement[i];
  754. ret = ttm_mem_type_from_place(place, &mem_type);
  755. if (ret)
  756. return ret;
  757. man = &bdev->man[mem_type];
  758. if (!man->has_type || !man->use_type)
  759. continue;
  760. type_ok = ttm_bo_mt_compatible(man, mem_type, place,
  761. &cur_flags);
  762. if (!type_ok)
  763. continue;
  764. type_found = true;
  765. cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
  766. cur_flags);
  767. /*
  768. * Use the access and other non-mapping-related flag bits from
  769. * the memory placement flags to the current flags
  770. */
  771. ttm_flag_masked(&cur_flags, place->flags,
  772. ~TTM_PL_MASK_MEMTYPE);
  773. if (mem_type == TTM_PL_SYSTEM)
  774. break;
  775. ret = (*man->func->get_node)(man, bo, place, mem);
  776. if (unlikely(ret))
  777. return ret;
  778. if (mem->mm_node) {
  779. ret = ttm_bo_add_move_fence(bo, man, mem);
  780. if (unlikely(ret)) {
  781. (*man->func->put_node)(man, mem);
  782. return ret;
  783. }
  784. break;
  785. }
  786. }
  787. if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
  788. mem->mem_type = mem_type;
  789. mem->placement = cur_flags;
  790. return 0;
  791. }
  792. for (i = 0; i < placement->num_busy_placement; ++i) {
  793. const struct ttm_place *place = &placement->busy_placement[i];
  794. ret = ttm_mem_type_from_place(place, &mem_type);
  795. if (ret)
  796. return ret;
  797. man = &bdev->man[mem_type];
  798. if (!man->has_type || !man->use_type)
  799. continue;
  800. if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
  801. continue;
  802. type_found = true;
  803. cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
  804. cur_flags);
  805. /*
  806. * Use the access and other non-mapping-related flag bits from
  807. * the memory placement flags to the current flags
  808. */
  809. ttm_flag_masked(&cur_flags, place->flags,
  810. ~TTM_PL_MASK_MEMTYPE);
  811. if (mem_type == TTM_PL_SYSTEM) {
  812. mem->mem_type = mem_type;
  813. mem->placement = cur_flags;
  814. mem->mm_node = NULL;
  815. return 0;
  816. }
  817. ret = ttm_bo_mem_force_space(bo, mem_type, place, mem,
  818. interruptible, no_wait_gpu);
  819. if (ret == 0 && mem->mm_node) {
  820. mem->placement = cur_flags;
  821. return 0;
  822. }
  823. if (ret == -ERESTARTSYS)
  824. has_erestartsys = true;
  825. }
  826. if (!type_found) {
  827. pr_err(TTM_PFX "No compatible memory type found\n");
  828. return -EINVAL;
  829. }
  830. return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
  831. }
  832. EXPORT_SYMBOL(ttm_bo_mem_space);
  833. static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
  834. struct ttm_placement *placement,
  835. bool interruptible,
  836. bool no_wait_gpu)
  837. {
  838. int ret = 0;
  839. struct ttm_mem_reg mem;
  840. lockdep_assert_held(&bo->resv->lock.base);
  841. mem.num_pages = bo->num_pages;
  842. mem.size = mem.num_pages << PAGE_SHIFT;
  843. mem.page_alignment = bo->mem.page_alignment;
  844. mem.bus.io_reserved_vm = false;
  845. mem.bus.io_reserved_count = 0;
  846. /*
  847. * Determine where to move the buffer.
  848. */
  849. ret = ttm_bo_mem_space(bo, placement, &mem,
  850. interruptible, no_wait_gpu);
  851. if (ret)
  852. goto out_unlock;
  853. ret = ttm_bo_handle_move_mem(bo, &mem, false,
  854. interruptible, no_wait_gpu);
  855. out_unlock:
  856. if (ret && mem.mm_node)
  857. ttm_bo_mem_put(bo, &mem);
  858. return ret;
  859. }
  860. static bool ttm_bo_places_compat(const struct ttm_place *places,
  861. unsigned num_placement,
  862. struct ttm_mem_reg *mem,
  863. uint32_t *new_flags)
  864. {
  865. unsigned i;
  866. for (i = 0; i < num_placement; i++) {
  867. const struct ttm_place *heap = &places[i];
  868. if (mem->mm_node && (mem->start < heap->fpfn ||
  869. (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
  870. continue;
  871. *new_flags = heap->flags;
  872. if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
  873. (*new_flags & mem->placement & TTM_PL_MASK_MEM) &&
  874. (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
  875. (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
  876. return true;
  877. }
  878. return false;
  879. }
  880. bool ttm_bo_mem_compat(struct ttm_placement *placement,
  881. struct ttm_mem_reg *mem,
  882. uint32_t *new_flags)
  883. {
  884. if (ttm_bo_places_compat(placement->placement, placement->num_placement,
  885. mem, new_flags))
  886. return true;
  887. if ((placement->busy_placement != placement->placement ||
  888. placement->num_busy_placement > placement->num_placement) &&
  889. ttm_bo_places_compat(placement->busy_placement,
  890. placement->num_busy_placement,
  891. mem, new_flags))
  892. return true;
  893. return false;
  894. }
  895. EXPORT_SYMBOL(ttm_bo_mem_compat);
  896. int ttm_bo_validate(struct ttm_buffer_object *bo,
  897. struct ttm_placement *placement,
  898. bool interruptible,
  899. bool no_wait_gpu)
  900. {
  901. int ret;
  902. uint32_t new_flags;
  903. lockdep_assert_held(&bo->resv->lock.base);
  904. /*
  905. * Check whether we need to move buffer.
  906. */
  907. if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
  908. ret = ttm_bo_move_buffer(bo, placement, interruptible,
  909. no_wait_gpu);
  910. if (ret)
  911. return ret;
  912. } else {
  913. /*
  914. * Use the access and other non-mapping-related flag bits from
  915. * the compatible memory placement flags to the active flags
  916. */
  917. ttm_flag_masked(&bo->mem.placement, new_flags,
  918. ~TTM_PL_MASK_MEMTYPE);
  919. }
  920. /*
  921. * We might need to add a TTM.
  922. */
  923. if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
  924. ret = ttm_bo_add_ttm(bo, true);
  925. if (ret)
  926. return ret;
  927. }
  928. return 0;
  929. }
  930. EXPORT_SYMBOL(ttm_bo_validate);
  931. int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
  932. struct ttm_buffer_object *bo,
  933. unsigned long size,
  934. enum ttm_bo_type type,
  935. struct ttm_placement *placement,
  936. uint32_t page_alignment,
  937. bool interruptible,
  938. struct file *persistent_swap_storage,
  939. size_t acc_size,
  940. struct sg_table *sg,
  941. struct reservation_object *resv,
  942. void (*destroy) (struct ttm_buffer_object *))
  943. {
  944. int ret = 0;
  945. unsigned long num_pages;
  946. struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
  947. bool locked;
  948. ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
  949. if (ret) {
  950. pr_err("Out of kernel memory\n");
  951. if (destroy)
  952. (*destroy)(bo);
  953. else
  954. kfree(bo);
  955. return -ENOMEM;
  956. }
  957. num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  958. if (num_pages == 0) {
  959. pr_err("Illegal buffer object size\n");
  960. if (destroy)
  961. (*destroy)(bo);
  962. else
  963. kfree(bo);
  964. ttm_mem_global_free(mem_glob, acc_size);
  965. return -EINVAL;
  966. }
  967. bo->destroy = destroy;
  968. kref_init(&bo->kref);
  969. kref_init(&bo->list_kref);
  970. atomic_set(&bo->cpu_writers, 0);
  971. INIT_LIST_HEAD(&bo->lru);
  972. INIT_LIST_HEAD(&bo->ddestroy);
  973. INIT_LIST_HEAD(&bo->swap);
  974. INIT_LIST_HEAD(&bo->io_reserve_lru);
  975. mutex_init(&bo->wu_mutex);
  976. bo->bdev = bdev;
  977. bo->glob = bdev->glob;
  978. bo->type = type;
  979. bo->num_pages = num_pages;
  980. bo->mem.size = num_pages << PAGE_SHIFT;
  981. bo->mem.mem_type = TTM_PL_SYSTEM;
  982. bo->mem.num_pages = bo->num_pages;
  983. bo->mem.mm_node = NULL;
  984. bo->mem.page_alignment = page_alignment;
  985. bo->mem.bus.io_reserved_vm = false;
  986. bo->mem.bus.io_reserved_count = 0;
  987. bo->moving = NULL;
  988. bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
  989. bo->persistent_swap_storage = persistent_swap_storage;
  990. bo->acc_size = acc_size;
  991. bo->sg = sg;
  992. if (resv) {
  993. bo->resv = resv;
  994. lockdep_assert_held(&bo->resv->lock.base);
  995. } else {
  996. bo->resv = &bo->ttm_resv;
  997. reservation_object_init(&bo->ttm_resv);
  998. }
  999. atomic_inc(&bo->glob->bo_count);
  1000. drm_vma_node_reset(&bo->vma_node);
  1001. bo->priority = 0;
  1002. /*
  1003. * For ttm_bo_type_device buffers, allocate
  1004. * address space from the device.
  1005. */
  1006. if (bo->type == ttm_bo_type_device ||
  1007. bo->type == ttm_bo_type_sg)
  1008. ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
  1009. bo->mem.num_pages);
  1010. /* passed reservation objects should already be locked,
  1011. * since otherwise lockdep will be angered in radeon.
  1012. */
  1013. if (!resv) {
  1014. locked = ww_mutex_trylock(&bo->resv->lock);
  1015. WARN_ON(!locked);
  1016. }
  1017. if (likely(!ret))
  1018. ret = ttm_bo_validate(bo, placement, interruptible, false);
  1019. if (unlikely(ret)) {
  1020. if (!resv)
  1021. ttm_bo_unreserve(bo);
  1022. ttm_bo_unref(&bo);
  1023. return ret;
  1024. }
  1025. if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
  1026. spin_lock(&bo->glob->lru_lock);
  1027. ttm_bo_add_to_lru(bo);
  1028. spin_unlock(&bo->glob->lru_lock);
  1029. }
  1030. return ret;
  1031. }
  1032. EXPORT_SYMBOL(ttm_bo_init_reserved);
  1033. int ttm_bo_init(struct ttm_bo_device *bdev,
  1034. struct ttm_buffer_object *bo,
  1035. unsigned long size,
  1036. enum ttm_bo_type type,
  1037. struct ttm_placement *placement,
  1038. uint32_t page_alignment,
  1039. bool interruptible,
  1040. struct file *persistent_swap_storage,
  1041. size_t acc_size,
  1042. struct sg_table *sg,
  1043. struct reservation_object *resv,
  1044. void (*destroy) (struct ttm_buffer_object *))
  1045. {
  1046. int ret;
  1047. ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
  1048. page_alignment, interruptible,
  1049. persistent_swap_storage, acc_size,
  1050. sg, resv, destroy);
  1051. if (ret)
  1052. return ret;
  1053. if (!resv)
  1054. ttm_bo_unreserve(bo);
  1055. return 0;
  1056. }
  1057. EXPORT_SYMBOL(ttm_bo_init);
  1058. size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
  1059. unsigned long bo_size,
  1060. unsigned struct_size)
  1061. {
  1062. unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
  1063. size_t size = 0;
  1064. size += ttm_round_pot(struct_size);
  1065. size += ttm_round_pot(npages * sizeof(void *));
  1066. size += ttm_round_pot(sizeof(struct ttm_tt));
  1067. return size;
  1068. }
  1069. EXPORT_SYMBOL(ttm_bo_acc_size);
  1070. size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
  1071. unsigned long bo_size,
  1072. unsigned struct_size)
  1073. {
  1074. unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
  1075. size_t size = 0;
  1076. size += ttm_round_pot(struct_size);
  1077. size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
  1078. size += ttm_round_pot(sizeof(struct ttm_dma_tt));
  1079. return size;
  1080. }
  1081. EXPORT_SYMBOL(ttm_bo_dma_acc_size);
  1082. int ttm_bo_create(struct ttm_bo_device *bdev,
  1083. unsigned long size,
  1084. enum ttm_bo_type type,
  1085. struct ttm_placement *placement,
  1086. uint32_t page_alignment,
  1087. bool interruptible,
  1088. struct file *persistent_swap_storage,
  1089. struct ttm_buffer_object **p_bo)
  1090. {
  1091. struct ttm_buffer_object *bo;
  1092. size_t acc_size;
  1093. int ret;
  1094. bo = kzalloc(sizeof(*bo), GFP_KERNEL);
  1095. if (unlikely(bo == NULL))
  1096. return -ENOMEM;
  1097. acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
  1098. ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
  1099. interruptible, persistent_swap_storage, acc_size,
  1100. NULL, NULL, NULL);
  1101. if (likely(ret == 0))
  1102. *p_bo = bo;
  1103. return ret;
  1104. }
  1105. EXPORT_SYMBOL(ttm_bo_create);
  1106. static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
  1107. unsigned mem_type)
  1108. {
  1109. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  1110. struct ttm_bo_global *glob = bdev->glob;
  1111. struct dma_fence *fence;
  1112. int ret;
  1113. unsigned i;
  1114. /*
  1115. * Can't use standard list traversal since we're unlocking.
  1116. */
  1117. spin_lock(&glob->lru_lock);
  1118. for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
  1119. while (!list_empty(&man->lru[i])) {
  1120. spin_unlock(&glob->lru_lock);
  1121. ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false);
  1122. if (ret)
  1123. return ret;
  1124. spin_lock(&glob->lru_lock);
  1125. }
  1126. }
  1127. spin_unlock(&glob->lru_lock);
  1128. spin_lock(&man->move_lock);
  1129. fence = dma_fence_get(man->move);
  1130. spin_unlock(&man->move_lock);
  1131. if (fence) {
  1132. ret = dma_fence_wait(fence, false);
  1133. dma_fence_put(fence);
  1134. if (ret)
  1135. return ret;
  1136. }
  1137. return 0;
  1138. }
  1139. int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
  1140. {
  1141. struct ttm_mem_type_manager *man;
  1142. int ret = -EINVAL;
  1143. if (mem_type >= TTM_NUM_MEM_TYPES) {
  1144. pr_err("Illegal memory type %d\n", mem_type);
  1145. return ret;
  1146. }
  1147. man = &bdev->man[mem_type];
  1148. if (!man->has_type) {
  1149. pr_err("Trying to take down uninitialized memory manager type %u\n",
  1150. mem_type);
  1151. return ret;
  1152. }
  1153. dma_fence_put(man->move);
  1154. man->use_type = false;
  1155. man->has_type = false;
  1156. ret = 0;
  1157. if (mem_type > 0) {
  1158. ret = ttm_bo_force_list_clean(bdev, mem_type);
  1159. if (ret) {
  1160. pr_err("Cleanup eviction failed\n");
  1161. return ret;
  1162. }
  1163. ret = (*man->func->takedown)(man);
  1164. }
  1165. return ret;
  1166. }
  1167. EXPORT_SYMBOL(ttm_bo_clean_mm);
  1168. int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
  1169. {
  1170. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  1171. if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
  1172. pr_err("Illegal memory manager memory type %u\n", mem_type);
  1173. return -EINVAL;
  1174. }
  1175. if (!man->has_type) {
  1176. pr_err("Memory type %u has not been initialized\n", mem_type);
  1177. return 0;
  1178. }
  1179. return ttm_bo_force_list_clean(bdev, mem_type);
  1180. }
  1181. EXPORT_SYMBOL(ttm_bo_evict_mm);
  1182. int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
  1183. unsigned long p_size)
  1184. {
  1185. int ret;
  1186. struct ttm_mem_type_manager *man;
  1187. unsigned i;
  1188. BUG_ON(type >= TTM_NUM_MEM_TYPES);
  1189. man = &bdev->man[type];
  1190. BUG_ON(man->has_type);
  1191. man->io_reserve_fastpath = true;
  1192. man->use_io_reserve_lru = false;
  1193. mutex_init(&man->io_reserve_mutex);
  1194. spin_lock_init(&man->move_lock);
  1195. INIT_LIST_HEAD(&man->io_reserve_lru);
  1196. ret = bdev->driver->init_mem_type(bdev, type, man);
  1197. if (ret)
  1198. return ret;
  1199. man->bdev = bdev;
  1200. if (type != TTM_PL_SYSTEM) {
  1201. ret = (*man->func->init)(man, p_size);
  1202. if (ret)
  1203. return ret;
  1204. }
  1205. man->has_type = true;
  1206. man->use_type = true;
  1207. man->size = p_size;
  1208. for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
  1209. INIT_LIST_HEAD(&man->lru[i]);
  1210. man->move = NULL;
  1211. return 0;
  1212. }
  1213. EXPORT_SYMBOL(ttm_bo_init_mm);
  1214. static void ttm_bo_global_kobj_release(struct kobject *kobj)
  1215. {
  1216. struct ttm_bo_global *glob =
  1217. container_of(kobj, struct ttm_bo_global, kobj);
  1218. ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
  1219. __free_page(glob->dummy_read_page);
  1220. kfree(glob);
  1221. }
  1222. void ttm_bo_global_release(struct drm_global_reference *ref)
  1223. {
  1224. struct ttm_bo_global *glob = ref->object;
  1225. kobject_del(&glob->kobj);
  1226. kobject_put(&glob->kobj);
  1227. }
  1228. EXPORT_SYMBOL(ttm_bo_global_release);
  1229. int ttm_bo_global_init(struct drm_global_reference *ref)
  1230. {
  1231. struct ttm_bo_global_ref *bo_ref =
  1232. container_of(ref, struct ttm_bo_global_ref, ref);
  1233. struct ttm_bo_global *glob = ref->object;
  1234. int ret;
  1235. unsigned i;
  1236. mutex_init(&glob->device_list_mutex);
  1237. spin_lock_init(&glob->lru_lock);
  1238. glob->mem_glob = bo_ref->mem_glob;
  1239. glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
  1240. if (unlikely(glob->dummy_read_page == NULL)) {
  1241. ret = -ENOMEM;
  1242. goto out_no_drp;
  1243. }
  1244. for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
  1245. INIT_LIST_HEAD(&glob->swap_lru[i]);
  1246. INIT_LIST_HEAD(&glob->device_list);
  1247. ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
  1248. ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
  1249. if (unlikely(ret != 0)) {
  1250. pr_err("Could not register buffer object swapout\n");
  1251. goto out_no_shrink;
  1252. }
  1253. atomic_set(&glob->bo_count, 0);
  1254. ret = kobject_init_and_add(
  1255. &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
  1256. if (unlikely(ret != 0))
  1257. kobject_put(&glob->kobj);
  1258. return ret;
  1259. out_no_shrink:
  1260. __free_page(glob->dummy_read_page);
  1261. out_no_drp:
  1262. kfree(glob);
  1263. return ret;
  1264. }
  1265. EXPORT_SYMBOL(ttm_bo_global_init);
  1266. int ttm_bo_device_release(struct ttm_bo_device *bdev)
  1267. {
  1268. int ret = 0;
  1269. unsigned i = TTM_NUM_MEM_TYPES;
  1270. struct ttm_mem_type_manager *man;
  1271. struct ttm_bo_global *glob = bdev->glob;
  1272. while (i--) {
  1273. man = &bdev->man[i];
  1274. if (man->has_type) {
  1275. man->use_type = false;
  1276. if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
  1277. ret = -EBUSY;
  1278. pr_err("DRM memory manager type %d is not clean\n",
  1279. i);
  1280. }
  1281. man->has_type = false;
  1282. }
  1283. }
  1284. mutex_lock(&glob->device_list_mutex);
  1285. list_del(&bdev->device_list);
  1286. mutex_unlock(&glob->device_list_mutex);
  1287. cancel_delayed_work_sync(&bdev->wq);
  1288. while (ttm_bo_delayed_delete(bdev, true))
  1289. ;
  1290. spin_lock(&glob->lru_lock);
  1291. if (list_empty(&bdev->ddestroy))
  1292. TTM_DEBUG("Delayed destroy list was clean\n");
  1293. for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
  1294. if (list_empty(&bdev->man[0].lru[0]))
  1295. TTM_DEBUG("Swap list %d was clean\n", i);
  1296. spin_unlock(&glob->lru_lock);
  1297. drm_vma_offset_manager_destroy(&bdev->vma_manager);
  1298. return ret;
  1299. }
  1300. EXPORT_SYMBOL(ttm_bo_device_release);
  1301. int ttm_bo_device_init(struct ttm_bo_device *bdev,
  1302. struct ttm_bo_global *glob,
  1303. struct ttm_bo_driver *driver,
  1304. struct address_space *mapping,
  1305. uint64_t file_page_offset,
  1306. bool need_dma32)
  1307. {
  1308. int ret = -EINVAL;
  1309. bdev->driver = driver;
  1310. memset(bdev->man, 0, sizeof(bdev->man));
  1311. /*
  1312. * Initialize the system memory buffer type.
  1313. * Other types need to be driver / IOCTL initialized.
  1314. */
  1315. ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
  1316. if (unlikely(ret != 0))
  1317. goto out_no_sys;
  1318. drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
  1319. 0x10000000);
  1320. INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
  1321. INIT_LIST_HEAD(&bdev->ddestroy);
  1322. bdev->dev_mapping = mapping;
  1323. bdev->glob = glob;
  1324. bdev->need_dma32 = need_dma32;
  1325. mutex_lock(&glob->device_list_mutex);
  1326. list_add_tail(&bdev->device_list, &glob->device_list);
  1327. mutex_unlock(&glob->device_list_mutex);
  1328. return 0;
  1329. out_no_sys:
  1330. return ret;
  1331. }
  1332. EXPORT_SYMBOL(ttm_bo_device_init);
  1333. /*
  1334. * buffer object vm functions.
  1335. */
  1336. bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  1337. {
  1338. struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
  1339. if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
  1340. if (mem->mem_type == TTM_PL_SYSTEM)
  1341. return false;
  1342. if (man->flags & TTM_MEMTYPE_FLAG_CMA)
  1343. return false;
  1344. if (mem->placement & TTM_PL_FLAG_CACHED)
  1345. return false;
  1346. }
  1347. return true;
  1348. }
  1349. void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
  1350. {
  1351. struct ttm_bo_device *bdev = bo->bdev;
  1352. drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
  1353. ttm_mem_io_free_vm(bo);
  1354. }
  1355. void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
  1356. {
  1357. struct ttm_bo_device *bdev = bo->bdev;
  1358. struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
  1359. ttm_mem_io_lock(man, false);
  1360. ttm_bo_unmap_virtual_locked(bo);
  1361. ttm_mem_io_unlock(man);
  1362. }
  1363. EXPORT_SYMBOL(ttm_bo_unmap_virtual);
  1364. int ttm_bo_wait(struct ttm_buffer_object *bo,
  1365. bool interruptible, bool no_wait)
  1366. {
  1367. long timeout = 15 * HZ;
  1368. if (no_wait) {
  1369. if (reservation_object_test_signaled_rcu(bo->resv, true))
  1370. return 0;
  1371. else
  1372. return -EBUSY;
  1373. }
  1374. timeout = reservation_object_wait_timeout_rcu(bo->resv, true,
  1375. interruptible, timeout);
  1376. if (timeout < 0)
  1377. return timeout;
  1378. if (timeout == 0)
  1379. return -EBUSY;
  1380. reservation_object_add_excl_fence(bo->resv, NULL);
  1381. return 0;
  1382. }
  1383. EXPORT_SYMBOL(ttm_bo_wait);
  1384. int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
  1385. {
  1386. int ret = 0;
  1387. /*
  1388. * Using ttm_bo_reserve makes sure the lru lists are updated.
  1389. */
  1390. ret = ttm_bo_reserve(bo, true, no_wait, NULL);
  1391. if (unlikely(ret != 0))
  1392. return ret;
  1393. ret = ttm_bo_wait(bo, true, no_wait);
  1394. if (likely(ret == 0))
  1395. atomic_inc(&bo->cpu_writers);
  1396. ttm_bo_unreserve(bo);
  1397. return ret;
  1398. }
  1399. EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
  1400. void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
  1401. {
  1402. atomic_dec(&bo->cpu_writers);
  1403. }
  1404. EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
  1405. /**
  1406. * A buffer object shrink method that tries to swap out the first
  1407. * buffer object on the bo_global::swap_lru list.
  1408. */
  1409. static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
  1410. {
  1411. struct ttm_bo_global *glob =
  1412. container_of(shrink, struct ttm_bo_global, shrink);
  1413. struct ttm_buffer_object *bo;
  1414. int ret = -EBUSY;
  1415. unsigned i;
  1416. spin_lock(&glob->lru_lock);
  1417. for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
  1418. list_for_each_entry(bo, &glob->swap_lru[i], swap) {
  1419. ret = __ttm_bo_reserve(bo, false, true, NULL);
  1420. if (!ret)
  1421. break;
  1422. }
  1423. if (!ret)
  1424. break;
  1425. }
  1426. if (ret) {
  1427. spin_unlock(&glob->lru_lock);
  1428. return ret;
  1429. }
  1430. kref_get(&bo->list_kref);
  1431. if (!list_empty(&bo->ddestroy)) {
  1432. ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
  1433. kref_put(&bo->list_kref, ttm_bo_release_list);
  1434. return ret;
  1435. }
  1436. ttm_bo_del_from_lru(bo);
  1437. spin_unlock(&glob->lru_lock);
  1438. /**
  1439. * Move to system cached
  1440. */
  1441. if (bo->mem.mem_type != TTM_PL_SYSTEM ||
  1442. bo->ttm->caching_state != tt_cached) {
  1443. struct ttm_mem_reg evict_mem;
  1444. evict_mem = bo->mem;
  1445. evict_mem.mm_node = NULL;
  1446. evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
  1447. evict_mem.mem_type = TTM_PL_SYSTEM;
  1448. ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
  1449. false, false);
  1450. if (unlikely(ret != 0))
  1451. goto out;
  1452. }
  1453. /**
  1454. * Make sure BO is idle.
  1455. */
  1456. ret = ttm_bo_wait(bo, false, false);
  1457. if (unlikely(ret != 0))
  1458. goto out;
  1459. ttm_bo_unmap_virtual(bo);
  1460. /**
  1461. * Swap out. Buffer will be swapped in again as soon as
  1462. * anyone tries to access a ttm page.
  1463. */
  1464. if (bo->bdev->driver->swap_notify)
  1465. bo->bdev->driver->swap_notify(bo);
  1466. ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
  1467. out:
  1468. /**
  1469. *
  1470. * Unreserve without putting on LRU to avoid swapping out an
  1471. * already swapped buffer.
  1472. */
  1473. __ttm_bo_unreserve(bo);
  1474. kref_put(&bo->list_kref, ttm_bo_release_list);
  1475. return ret;
  1476. }
  1477. void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
  1478. {
  1479. while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
  1480. ;
  1481. }
  1482. EXPORT_SYMBOL(ttm_bo_swapout_all);
  1483. /**
  1484. * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
  1485. * unreserved
  1486. *
  1487. * @bo: Pointer to buffer
  1488. */
  1489. int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
  1490. {
  1491. int ret;
  1492. /*
  1493. * In the absense of a wait_unlocked API,
  1494. * Use the bo::wu_mutex to avoid triggering livelocks due to
  1495. * concurrent use of this function. Note that this use of
  1496. * bo::wu_mutex can go away if we change locking order to
  1497. * mmap_sem -> bo::reserve.
  1498. */
  1499. ret = mutex_lock_interruptible(&bo->wu_mutex);
  1500. if (unlikely(ret != 0))
  1501. return -ERESTARTSYS;
  1502. if (!ww_mutex_is_locked(&bo->resv->lock))
  1503. goto out_unlock;
  1504. ret = __ttm_bo_reserve(bo, true, false, NULL);
  1505. if (unlikely(ret != 0))
  1506. goto out_unlock;
  1507. __ttm_bo_unreserve(bo);
  1508. out_unlock:
  1509. mutex_unlock(&bo->wu_mutex);
  1510. return ret;
  1511. }