ttm_bo.c 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799
  1. /* SPDX-License-Identifier: GPL-2.0 OR MIT */
  2. /**************************************************************************
  3. *
  4. * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. **************************************************************************/
  28. /*
  29. * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  30. */
  31. #define pr_fmt(fmt) "[TTM] " fmt
  32. #include <drm/ttm/ttm_module.h>
  33. #include <drm/ttm/ttm_bo_driver.h>
  34. #include <drm/ttm/ttm_placement.h>
  35. #include <linux/jiffies.h>
  36. #include <linux/slab.h>
  37. #include <linux/sched.h>
  38. #include <linux/mm.h>
  39. #include <linux/file.h>
  40. #include <linux/module.h>
  41. #include <linux/atomic.h>
  42. #include <linux/reservation.h>
  43. static void ttm_bo_global_kobj_release(struct kobject *kobj);
  44. static struct attribute ttm_bo_count = {
  45. .name = "bo_count",
  46. .mode = S_IRUGO
  47. };
  48. /* default destructor */
  49. static void ttm_bo_default_destroy(struct ttm_buffer_object *bo)
  50. {
  51. kfree(bo);
  52. }
  53. static inline int ttm_mem_type_from_place(const struct ttm_place *place,
  54. uint32_t *mem_type)
  55. {
  56. int pos;
  57. pos = ffs(place->flags & TTM_PL_MASK_MEM);
  58. if (unlikely(!pos))
  59. return -EINVAL;
  60. *mem_type = pos - 1;
  61. return 0;
  62. }
  63. static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
  64. {
  65. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  66. struct drm_printer p = drm_debug_printer(TTM_PFX);
  67. pr_err(" has_type: %d\n", man->has_type);
  68. pr_err(" use_type: %d\n", man->use_type);
  69. pr_err(" flags: 0x%08X\n", man->flags);
  70. pr_err(" gpu_offset: 0x%08llX\n", man->gpu_offset);
  71. pr_err(" size: %llu\n", man->size);
  72. pr_err(" available_caching: 0x%08X\n", man->available_caching);
  73. pr_err(" default_caching: 0x%08X\n", man->default_caching);
  74. if (mem_type != TTM_PL_SYSTEM)
  75. (*man->func->debug)(man, &p);
  76. }
  77. static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
  78. struct ttm_placement *placement)
  79. {
  80. int i, ret, mem_type;
  81. pr_err("No space for %p (%lu pages, %luK, %luM)\n",
  82. bo, bo->mem.num_pages, bo->mem.size >> 10,
  83. bo->mem.size >> 20);
  84. for (i = 0; i < placement->num_placement; i++) {
  85. ret = ttm_mem_type_from_place(&placement->placement[i],
  86. &mem_type);
  87. if (ret)
  88. return;
  89. pr_err(" placement[%d]=0x%08X (%d)\n",
  90. i, placement->placement[i].flags, mem_type);
  91. ttm_mem_type_debug(bo->bdev, mem_type);
  92. }
  93. }
  94. static ssize_t ttm_bo_global_show(struct kobject *kobj,
  95. struct attribute *attr,
  96. char *buffer)
  97. {
  98. struct ttm_bo_global *glob =
  99. container_of(kobj, struct ttm_bo_global, kobj);
  100. return snprintf(buffer, PAGE_SIZE, "%d\n",
  101. atomic_read(&glob->bo_count));
  102. }
  103. static struct attribute *ttm_bo_global_attrs[] = {
  104. &ttm_bo_count,
  105. NULL
  106. };
  107. static const struct sysfs_ops ttm_bo_global_ops = {
  108. .show = &ttm_bo_global_show
  109. };
  110. static struct kobj_type ttm_bo_glob_kobj_type = {
  111. .release = &ttm_bo_global_kobj_release,
  112. .sysfs_ops = &ttm_bo_global_ops,
  113. .default_attrs = ttm_bo_global_attrs
  114. };
  115. static inline uint32_t ttm_bo_type_flags(unsigned type)
  116. {
  117. return 1 << (type);
  118. }
  119. static void ttm_bo_release_list(struct kref *list_kref)
  120. {
  121. struct ttm_buffer_object *bo =
  122. container_of(list_kref, struct ttm_buffer_object, list_kref);
  123. struct ttm_bo_device *bdev = bo->bdev;
  124. size_t acc_size = bo->acc_size;
  125. BUG_ON(kref_read(&bo->list_kref));
  126. BUG_ON(kref_read(&bo->kref));
  127. BUG_ON(atomic_read(&bo->cpu_writers));
  128. BUG_ON(bo->mem.mm_node != NULL);
  129. BUG_ON(!list_empty(&bo->lru));
  130. BUG_ON(!list_empty(&bo->ddestroy));
  131. ttm_tt_destroy(bo->ttm);
  132. atomic_dec(&bo->bdev->glob->bo_count);
  133. dma_fence_put(bo->moving);
  134. reservation_object_fini(&bo->ttm_resv);
  135. mutex_destroy(&bo->wu_mutex);
  136. bo->destroy(bo);
  137. ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
  138. }
  139. void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
  140. {
  141. struct ttm_bo_device *bdev = bo->bdev;
  142. struct ttm_mem_type_manager *man;
  143. reservation_object_assert_held(bo->resv);
  144. if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
  145. BUG_ON(!list_empty(&bo->lru));
  146. man = &bdev->man[bo->mem.mem_type];
  147. list_add_tail(&bo->lru, &man->lru[bo->priority]);
  148. kref_get(&bo->list_kref);
  149. if (bo->ttm && !(bo->ttm->page_flags &
  150. (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
  151. list_add_tail(&bo->swap,
  152. &bdev->glob->swap_lru[bo->priority]);
  153. kref_get(&bo->list_kref);
  154. }
  155. }
  156. }
  157. EXPORT_SYMBOL(ttm_bo_add_to_lru);
  158. static void ttm_bo_ref_bug(struct kref *list_kref)
  159. {
  160. BUG();
  161. }
  162. void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
  163. {
  164. if (!list_empty(&bo->swap)) {
  165. list_del_init(&bo->swap);
  166. kref_put(&bo->list_kref, ttm_bo_ref_bug);
  167. }
  168. if (!list_empty(&bo->lru)) {
  169. list_del_init(&bo->lru);
  170. kref_put(&bo->list_kref, ttm_bo_ref_bug);
  171. }
  172. /*
  173. * TODO: Add a driver hook to delete from
  174. * driver-specific LRU's here.
  175. */
  176. }
  177. void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
  178. {
  179. struct ttm_bo_global *glob = bo->bdev->glob;
  180. spin_lock(&glob->lru_lock);
  181. ttm_bo_del_from_lru(bo);
  182. spin_unlock(&glob->lru_lock);
  183. }
  184. EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
  185. void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
  186. {
  187. reservation_object_assert_held(bo->resv);
  188. ttm_bo_del_from_lru(bo);
  189. ttm_bo_add_to_lru(bo);
  190. }
  191. EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
  192. static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
  193. struct ttm_mem_reg *mem, bool evict,
  194. struct ttm_operation_ctx *ctx)
  195. {
  196. struct ttm_bo_device *bdev = bo->bdev;
  197. bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
  198. bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
  199. struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
  200. struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
  201. int ret = 0;
  202. if (old_is_pci || new_is_pci ||
  203. ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
  204. ret = ttm_mem_io_lock(old_man, true);
  205. if (unlikely(ret != 0))
  206. goto out_err;
  207. ttm_bo_unmap_virtual_locked(bo);
  208. ttm_mem_io_unlock(old_man);
  209. }
  210. /*
  211. * Create and bind a ttm if required.
  212. */
  213. if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
  214. if (bo->ttm == NULL) {
  215. bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
  216. ret = ttm_tt_create(bo, zero);
  217. if (ret)
  218. goto out_err;
  219. }
  220. ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
  221. if (ret)
  222. goto out_err;
  223. if (mem->mem_type != TTM_PL_SYSTEM) {
  224. ret = ttm_tt_bind(bo->ttm, mem, ctx);
  225. if (ret)
  226. goto out_err;
  227. }
  228. if (bo->mem.mem_type == TTM_PL_SYSTEM) {
  229. if (bdev->driver->move_notify)
  230. bdev->driver->move_notify(bo, evict, mem);
  231. bo->mem = *mem;
  232. mem->mm_node = NULL;
  233. goto moved;
  234. }
  235. }
  236. if (bdev->driver->move_notify)
  237. bdev->driver->move_notify(bo, evict, mem);
  238. if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
  239. !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
  240. ret = ttm_bo_move_ttm(bo, ctx, mem);
  241. else if (bdev->driver->move)
  242. ret = bdev->driver->move(bo, evict, ctx, mem);
  243. else
  244. ret = ttm_bo_move_memcpy(bo, ctx, mem);
  245. if (ret) {
  246. if (bdev->driver->move_notify) {
  247. struct ttm_mem_reg tmp_mem = *mem;
  248. *mem = bo->mem;
  249. bo->mem = tmp_mem;
  250. bdev->driver->move_notify(bo, false, mem);
  251. bo->mem = *mem;
  252. *mem = tmp_mem;
  253. }
  254. goto out_err;
  255. }
  256. moved:
  257. if (bo->evicted) {
  258. if (bdev->driver->invalidate_caches) {
  259. ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
  260. if (ret)
  261. pr_err("Can not flush read caches\n");
  262. }
  263. bo->evicted = false;
  264. }
  265. if (bo->mem.mm_node)
  266. bo->offset = (bo->mem.start << PAGE_SHIFT) +
  267. bdev->man[bo->mem.mem_type].gpu_offset;
  268. else
  269. bo->offset = 0;
  270. ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
  271. return 0;
  272. out_err:
  273. new_man = &bdev->man[bo->mem.mem_type];
  274. if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) {
  275. ttm_tt_destroy(bo->ttm);
  276. bo->ttm = NULL;
  277. }
  278. return ret;
  279. }
  280. /**
  281. * Call bo::reserved.
  282. * Will release GPU memory type usage on destruction.
  283. * This is the place to put in driver specific hooks to release
  284. * driver private resources.
  285. * Will release the bo::reserved lock.
  286. */
  287. static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
  288. {
  289. if (bo->bdev->driver->move_notify)
  290. bo->bdev->driver->move_notify(bo, false, NULL);
  291. ttm_tt_destroy(bo->ttm);
  292. bo->ttm = NULL;
  293. ttm_bo_mem_put(bo, &bo->mem);
  294. }
  295. static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
  296. {
  297. int r;
  298. if (bo->resv == &bo->ttm_resv)
  299. return 0;
  300. BUG_ON(!reservation_object_trylock(&bo->ttm_resv));
  301. r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv);
  302. if (r)
  303. reservation_object_unlock(&bo->ttm_resv);
  304. return r;
  305. }
  306. static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
  307. {
  308. struct reservation_object_list *fobj;
  309. struct dma_fence *fence;
  310. int i;
  311. fobj = reservation_object_get_list(&bo->ttm_resv);
  312. fence = reservation_object_get_excl(&bo->ttm_resv);
  313. if (fence && !fence->ops->signaled)
  314. dma_fence_enable_sw_signaling(fence);
  315. for (i = 0; fobj && i < fobj->shared_count; ++i) {
  316. fence = rcu_dereference_protected(fobj->shared[i],
  317. reservation_object_held(bo->resv));
  318. if (!fence->ops->signaled)
  319. dma_fence_enable_sw_signaling(fence);
  320. }
  321. }
  322. static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
  323. {
  324. struct ttm_bo_device *bdev = bo->bdev;
  325. struct ttm_bo_global *glob = bdev->glob;
  326. int ret;
  327. ret = ttm_bo_individualize_resv(bo);
  328. if (ret) {
  329. /* Last resort, if we fail to allocate memory for the
  330. * fences block for the BO to become idle
  331. */
  332. reservation_object_wait_timeout_rcu(bo->resv, true, false,
  333. 30 * HZ);
  334. spin_lock(&glob->lru_lock);
  335. goto error;
  336. }
  337. spin_lock(&glob->lru_lock);
  338. ret = reservation_object_trylock(bo->resv) ? 0 : -EBUSY;
  339. if (!ret) {
  340. if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) {
  341. ttm_bo_del_from_lru(bo);
  342. spin_unlock(&glob->lru_lock);
  343. if (bo->resv != &bo->ttm_resv)
  344. reservation_object_unlock(&bo->ttm_resv);
  345. ttm_bo_cleanup_memtype_use(bo);
  346. reservation_object_unlock(bo->resv);
  347. return;
  348. }
  349. ttm_bo_flush_all_fences(bo);
  350. /*
  351. * Make NO_EVICT bos immediately available to
  352. * shrinkers, now that they are queued for
  353. * destruction.
  354. */
  355. if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
  356. bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
  357. ttm_bo_add_to_lru(bo);
  358. }
  359. reservation_object_unlock(bo->resv);
  360. }
  361. if (bo->resv != &bo->ttm_resv)
  362. reservation_object_unlock(&bo->ttm_resv);
  363. error:
  364. kref_get(&bo->list_kref);
  365. list_add_tail(&bo->ddestroy, &bdev->ddestroy);
  366. spin_unlock(&glob->lru_lock);
  367. schedule_delayed_work(&bdev->wq,
  368. ((HZ / 100) < 1) ? 1 : HZ / 100);
  369. }
  370. /**
  371. * function ttm_bo_cleanup_refs
  372. * If bo idle, remove from delayed- and lru lists, and unref.
  373. * If not idle, do nothing.
  374. *
  375. * Must be called with lru_lock and reservation held, this function
  376. * will drop the lru lock and optionally the reservation lock before returning.
  377. *
  378. * @interruptible Any sleeps should occur interruptibly.
  379. * @no_wait_gpu Never wait for gpu. Return -EBUSY instead.
  380. * @unlock_resv Unlock the reservation lock as well.
  381. */
  382. static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
  383. bool interruptible, bool no_wait_gpu,
  384. bool unlock_resv)
  385. {
  386. struct ttm_bo_global *glob = bo->bdev->glob;
  387. struct reservation_object *resv;
  388. int ret;
  389. if (unlikely(list_empty(&bo->ddestroy)))
  390. resv = bo->resv;
  391. else
  392. resv = &bo->ttm_resv;
  393. if (reservation_object_test_signaled_rcu(resv, true))
  394. ret = 0;
  395. else
  396. ret = -EBUSY;
  397. if (ret && !no_wait_gpu) {
  398. long lret;
  399. if (unlock_resv)
  400. reservation_object_unlock(bo->resv);
  401. spin_unlock(&glob->lru_lock);
  402. lret = reservation_object_wait_timeout_rcu(resv, true,
  403. interruptible,
  404. 30 * HZ);
  405. if (lret < 0)
  406. return lret;
  407. else if (lret == 0)
  408. return -EBUSY;
  409. spin_lock(&glob->lru_lock);
  410. if (unlock_resv && !reservation_object_trylock(bo->resv)) {
  411. /*
  412. * We raced, and lost, someone else holds the reservation now,
  413. * and is probably busy in ttm_bo_cleanup_memtype_use.
  414. *
  415. * Even if it's not the case, because we finished waiting any
  416. * delayed destruction would succeed, so just return success
  417. * here.
  418. */
  419. spin_unlock(&glob->lru_lock);
  420. return 0;
  421. }
  422. ret = 0;
  423. }
  424. if (ret || unlikely(list_empty(&bo->ddestroy))) {
  425. if (unlock_resv)
  426. reservation_object_unlock(bo->resv);
  427. spin_unlock(&glob->lru_lock);
  428. return ret;
  429. }
  430. ttm_bo_del_from_lru(bo);
  431. list_del_init(&bo->ddestroy);
  432. kref_put(&bo->list_kref, ttm_bo_ref_bug);
  433. spin_unlock(&glob->lru_lock);
  434. ttm_bo_cleanup_memtype_use(bo);
  435. if (unlock_resv)
  436. reservation_object_unlock(bo->resv);
  437. return 0;
  438. }
  439. /**
  440. * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
  441. * encountered buffers.
  442. */
  443. static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
  444. {
  445. struct ttm_bo_global *glob = bdev->glob;
  446. struct list_head removed;
  447. bool empty;
  448. INIT_LIST_HEAD(&removed);
  449. spin_lock(&glob->lru_lock);
  450. while (!list_empty(&bdev->ddestroy)) {
  451. struct ttm_buffer_object *bo;
  452. bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
  453. ddestroy);
  454. kref_get(&bo->list_kref);
  455. list_move_tail(&bo->ddestroy, &removed);
  456. if (remove_all || bo->resv != &bo->ttm_resv) {
  457. spin_unlock(&glob->lru_lock);
  458. reservation_object_lock(bo->resv, NULL);
  459. spin_lock(&glob->lru_lock);
  460. ttm_bo_cleanup_refs(bo, false, !remove_all, true);
  461. } else if (reservation_object_trylock(bo->resv)) {
  462. ttm_bo_cleanup_refs(bo, false, !remove_all, true);
  463. } else {
  464. spin_unlock(&glob->lru_lock);
  465. }
  466. kref_put(&bo->list_kref, ttm_bo_release_list);
  467. spin_lock(&glob->lru_lock);
  468. }
  469. list_splice_tail(&removed, &bdev->ddestroy);
  470. empty = list_empty(&bdev->ddestroy);
  471. spin_unlock(&glob->lru_lock);
  472. return empty;
  473. }
  474. static void ttm_bo_delayed_workqueue(struct work_struct *work)
  475. {
  476. struct ttm_bo_device *bdev =
  477. container_of(work, struct ttm_bo_device, wq.work);
  478. if (!ttm_bo_delayed_delete(bdev, false))
  479. schedule_delayed_work(&bdev->wq,
  480. ((HZ / 100) < 1) ? 1 : HZ / 100);
  481. }
  482. static void ttm_bo_release(struct kref *kref)
  483. {
  484. struct ttm_buffer_object *bo =
  485. container_of(kref, struct ttm_buffer_object, kref);
  486. struct ttm_bo_device *bdev = bo->bdev;
  487. struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
  488. drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
  489. ttm_mem_io_lock(man, false);
  490. ttm_mem_io_free_vm(bo);
  491. ttm_mem_io_unlock(man);
  492. ttm_bo_cleanup_refs_or_queue(bo);
  493. kref_put(&bo->list_kref, ttm_bo_release_list);
  494. }
  495. void ttm_bo_unref(struct ttm_buffer_object **p_bo)
  496. {
  497. struct ttm_buffer_object *bo = *p_bo;
  498. *p_bo = NULL;
  499. kref_put(&bo->kref, ttm_bo_release);
  500. }
  501. EXPORT_SYMBOL(ttm_bo_unref);
  502. int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
  503. {
  504. return cancel_delayed_work_sync(&bdev->wq);
  505. }
  506. EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
  507. void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
  508. {
  509. if (resched)
  510. schedule_delayed_work(&bdev->wq,
  511. ((HZ / 100) < 1) ? 1 : HZ / 100);
  512. }
  513. EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
  514. static int ttm_bo_evict(struct ttm_buffer_object *bo,
  515. struct ttm_operation_ctx *ctx)
  516. {
  517. struct ttm_bo_device *bdev = bo->bdev;
  518. struct ttm_mem_reg evict_mem;
  519. struct ttm_placement placement;
  520. int ret = 0;
  521. reservation_object_assert_held(bo->resv);
  522. placement.num_placement = 0;
  523. placement.num_busy_placement = 0;
  524. bdev->driver->evict_flags(bo, &placement);
  525. if (!placement.num_placement && !placement.num_busy_placement) {
  526. ret = ttm_bo_pipeline_gutting(bo);
  527. if (ret)
  528. return ret;
  529. return ttm_tt_create(bo, false);
  530. }
  531. evict_mem = bo->mem;
  532. evict_mem.mm_node = NULL;
  533. evict_mem.bus.io_reserved_vm = false;
  534. evict_mem.bus.io_reserved_count = 0;
  535. ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
  536. if (ret) {
  537. if (ret != -ERESTARTSYS) {
  538. pr_err("Failed to find memory space for buffer 0x%p eviction\n",
  539. bo);
  540. ttm_bo_mem_space_debug(bo, &placement);
  541. }
  542. goto out;
  543. }
  544. ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx);
  545. if (unlikely(ret)) {
  546. if (ret != -ERESTARTSYS)
  547. pr_err("Buffer eviction failed\n");
  548. ttm_bo_mem_put(bo, &evict_mem);
  549. goto out;
  550. }
  551. bo->evicted = true;
  552. out:
  553. return ret;
  554. }
  555. bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
  556. const struct ttm_place *place)
  557. {
  558. /* Don't evict this BO if it's outside of the
  559. * requested placement range
  560. */
  561. if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
  562. (place->lpfn && place->lpfn <= bo->mem.start))
  563. return false;
  564. return true;
  565. }
  566. EXPORT_SYMBOL(ttm_bo_eviction_valuable);
  567. /**
  568. * Check the target bo is allowable to be evicted or swapout, including cases:
  569. *
  570. * a. if share same reservation object with ctx->resv, have assumption
  571. * reservation objects should already be locked, so not lock again and
  572. * return true directly when either the opreation allow_reserved_eviction
  573. * or the target bo already is in delayed free list;
  574. *
  575. * b. Otherwise, trylock it.
  576. */
  577. static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
  578. struct ttm_operation_ctx *ctx, bool *locked)
  579. {
  580. bool ret = false;
  581. *locked = false;
  582. if (bo->resv == ctx->resv) {
  583. reservation_object_assert_held(bo->resv);
  584. if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT
  585. || !list_empty(&bo->ddestroy))
  586. ret = true;
  587. } else {
  588. *locked = reservation_object_trylock(bo->resv);
  589. ret = *locked;
  590. }
  591. return ret;
  592. }
  593. static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
  594. uint32_t mem_type,
  595. const struct ttm_place *place,
  596. struct ttm_operation_ctx *ctx)
  597. {
  598. struct ttm_bo_global *glob = bdev->glob;
  599. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  600. struct ttm_buffer_object *bo = NULL;
  601. bool locked = false;
  602. unsigned i;
  603. int ret;
  604. spin_lock(&glob->lru_lock);
  605. for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
  606. list_for_each_entry(bo, &man->lru[i], lru) {
  607. if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked))
  608. continue;
  609. if (place && !bdev->driver->eviction_valuable(bo,
  610. place)) {
  611. if (locked)
  612. reservation_object_unlock(bo->resv);
  613. continue;
  614. }
  615. break;
  616. }
  617. /* If the inner loop terminated early, we have our candidate */
  618. if (&bo->lru != &man->lru[i])
  619. break;
  620. bo = NULL;
  621. }
  622. if (!bo) {
  623. spin_unlock(&glob->lru_lock);
  624. return -EBUSY;
  625. }
  626. kref_get(&bo->list_kref);
  627. if (!list_empty(&bo->ddestroy)) {
  628. ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
  629. ctx->no_wait_gpu, locked);
  630. kref_put(&bo->list_kref, ttm_bo_release_list);
  631. return ret;
  632. }
  633. ttm_bo_del_from_lru(bo);
  634. spin_unlock(&glob->lru_lock);
  635. ret = ttm_bo_evict(bo, ctx);
  636. if (locked) {
  637. ttm_bo_unreserve(bo);
  638. } else {
  639. spin_lock(&glob->lru_lock);
  640. ttm_bo_add_to_lru(bo);
  641. spin_unlock(&glob->lru_lock);
  642. }
  643. kref_put(&bo->list_kref, ttm_bo_release_list);
  644. return ret;
  645. }
  646. void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
  647. {
  648. struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
  649. if (mem->mm_node)
  650. (*man->func->put_node)(man, mem);
  651. }
  652. EXPORT_SYMBOL(ttm_bo_mem_put);
  653. /**
  654. * Add the last move fence to the BO and reserve a new shared slot.
  655. */
  656. static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
  657. struct ttm_mem_type_manager *man,
  658. struct ttm_mem_reg *mem)
  659. {
  660. struct dma_fence *fence;
  661. int ret;
  662. spin_lock(&man->move_lock);
  663. fence = dma_fence_get(man->move);
  664. spin_unlock(&man->move_lock);
  665. if (fence) {
  666. reservation_object_add_shared_fence(bo->resv, fence);
  667. ret = reservation_object_reserve_shared(bo->resv);
  668. if (unlikely(ret))
  669. return ret;
  670. dma_fence_put(bo->moving);
  671. bo->moving = fence;
  672. }
  673. return 0;
  674. }
  675. /**
  676. * Repeatedly evict memory from the LRU for @mem_type until we create enough
  677. * space, or we've evicted everything and there isn't enough space.
  678. */
  679. static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
  680. uint32_t mem_type,
  681. const struct ttm_place *place,
  682. struct ttm_mem_reg *mem,
  683. struct ttm_operation_ctx *ctx)
  684. {
  685. struct ttm_bo_device *bdev = bo->bdev;
  686. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  687. int ret;
  688. do {
  689. ret = (*man->func->get_node)(man, bo, place, mem);
  690. if (unlikely(ret != 0))
  691. return ret;
  692. if (mem->mm_node)
  693. break;
  694. ret = ttm_mem_evict_first(bdev, mem_type, place, ctx);
  695. if (unlikely(ret != 0))
  696. return ret;
  697. } while (1);
  698. mem->mem_type = mem_type;
  699. return ttm_bo_add_move_fence(bo, man, mem);
  700. }
  701. static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
  702. uint32_t cur_placement,
  703. uint32_t proposed_placement)
  704. {
  705. uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
  706. uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
  707. /**
  708. * Keep current caching if possible.
  709. */
  710. if ((cur_placement & caching) != 0)
  711. result |= (cur_placement & caching);
  712. else if ((man->default_caching & caching) != 0)
  713. result |= man->default_caching;
  714. else if ((TTM_PL_FLAG_CACHED & caching) != 0)
  715. result |= TTM_PL_FLAG_CACHED;
  716. else if ((TTM_PL_FLAG_WC & caching) != 0)
  717. result |= TTM_PL_FLAG_WC;
  718. else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
  719. result |= TTM_PL_FLAG_UNCACHED;
  720. return result;
  721. }
  722. static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
  723. uint32_t mem_type,
  724. const struct ttm_place *place,
  725. uint32_t *masked_placement)
  726. {
  727. uint32_t cur_flags = ttm_bo_type_flags(mem_type);
  728. if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
  729. return false;
  730. if ((place->flags & man->available_caching) == 0)
  731. return false;
  732. cur_flags |= (place->flags & man->available_caching);
  733. *masked_placement = cur_flags;
  734. return true;
  735. }
  736. /**
  737. * Creates space for memory region @mem according to its type.
  738. *
  739. * This function first searches for free space in compatible memory types in
  740. * the priority order defined by the driver. If free space isn't found, then
  741. * ttm_bo_mem_force_space is attempted in priority order to evict and find
  742. * space.
  743. */
  744. int ttm_bo_mem_space(struct ttm_buffer_object *bo,
  745. struct ttm_placement *placement,
  746. struct ttm_mem_reg *mem,
  747. struct ttm_operation_ctx *ctx)
  748. {
  749. struct ttm_bo_device *bdev = bo->bdev;
  750. struct ttm_mem_type_manager *man;
  751. uint32_t mem_type = TTM_PL_SYSTEM;
  752. uint32_t cur_flags = 0;
  753. bool type_found = false;
  754. bool type_ok = false;
  755. bool has_erestartsys = false;
  756. int i, ret;
  757. ret = reservation_object_reserve_shared(bo->resv);
  758. if (unlikely(ret))
  759. return ret;
  760. mem->mm_node = NULL;
  761. for (i = 0; i < placement->num_placement; ++i) {
  762. const struct ttm_place *place = &placement->placement[i];
  763. ret = ttm_mem_type_from_place(place, &mem_type);
  764. if (ret)
  765. return ret;
  766. man = &bdev->man[mem_type];
  767. if (!man->has_type || !man->use_type)
  768. continue;
  769. type_ok = ttm_bo_mt_compatible(man, mem_type, place,
  770. &cur_flags);
  771. if (!type_ok)
  772. continue;
  773. type_found = true;
  774. cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
  775. cur_flags);
  776. /*
  777. * Use the access and other non-mapping-related flag bits from
  778. * the memory placement flags to the current flags
  779. */
  780. ttm_flag_masked(&cur_flags, place->flags,
  781. ~TTM_PL_MASK_MEMTYPE);
  782. if (mem_type == TTM_PL_SYSTEM)
  783. break;
  784. ret = (*man->func->get_node)(man, bo, place, mem);
  785. if (unlikely(ret))
  786. return ret;
  787. if (mem->mm_node) {
  788. ret = ttm_bo_add_move_fence(bo, man, mem);
  789. if (unlikely(ret)) {
  790. (*man->func->put_node)(man, mem);
  791. return ret;
  792. }
  793. break;
  794. }
  795. }
  796. if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
  797. mem->mem_type = mem_type;
  798. mem->placement = cur_flags;
  799. return 0;
  800. }
  801. for (i = 0; i < placement->num_busy_placement; ++i) {
  802. const struct ttm_place *place = &placement->busy_placement[i];
  803. ret = ttm_mem_type_from_place(place, &mem_type);
  804. if (ret)
  805. return ret;
  806. man = &bdev->man[mem_type];
  807. if (!man->has_type || !man->use_type)
  808. continue;
  809. if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
  810. continue;
  811. type_found = true;
  812. cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
  813. cur_flags);
  814. /*
  815. * Use the access and other non-mapping-related flag bits from
  816. * the memory placement flags to the current flags
  817. */
  818. ttm_flag_masked(&cur_flags, place->flags,
  819. ~TTM_PL_MASK_MEMTYPE);
  820. if (mem_type == TTM_PL_SYSTEM) {
  821. mem->mem_type = mem_type;
  822. mem->placement = cur_flags;
  823. mem->mm_node = NULL;
  824. return 0;
  825. }
  826. ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, ctx);
  827. if (ret == 0 && mem->mm_node) {
  828. mem->placement = cur_flags;
  829. return 0;
  830. }
  831. if (ret == -ERESTARTSYS)
  832. has_erestartsys = true;
  833. }
  834. if (!type_found) {
  835. pr_err(TTM_PFX "No compatible memory type found\n");
  836. return -EINVAL;
  837. }
  838. return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
  839. }
  840. EXPORT_SYMBOL(ttm_bo_mem_space);
  841. static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
  842. struct ttm_placement *placement,
  843. struct ttm_operation_ctx *ctx)
  844. {
  845. int ret = 0;
  846. struct ttm_mem_reg mem;
  847. reservation_object_assert_held(bo->resv);
  848. mem.num_pages = bo->num_pages;
  849. mem.size = mem.num_pages << PAGE_SHIFT;
  850. mem.page_alignment = bo->mem.page_alignment;
  851. mem.bus.io_reserved_vm = false;
  852. mem.bus.io_reserved_count = 0;
  853. /*
  854. * Determine where to move the buffer.
  855. */
  856. ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
  857. if (ret)
  858. goto out_unlock;
  859. ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx);
  860. out_unlock:
  861. if (ret && mem.mm_node)
  862. ttm_bo_mem_put(bo, &mem);
  863. return ret;
  864. }
  865. static bool ttm_bo_places_compat(const struct ttm_place *places,
  866. unsigned num_placement,
  867. struct ttm_mem_reg *mem,
  868. uint32_t *new_flags)
  869. {
  870. unsigned i;
  871. for (i = 0; i < num_placement; i++) {
  872. const struct ttm_place *heap = &places[i];
  873. if (mem->mm_node && (mem->start < heap->fpfn ||
  874. (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
  875. continue;
  876. *new_flags = heap->flags;
  877. if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
  878. (*new_flags & mem->placement & TTM_PL_MASK_MEM) &&
  879. (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
  880. (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
  881. return true;
  882. }
  883. return false;
  884. }
  885. bool ttm_bo_mem_compat(struct ttm_placement *placement,
  886. struct ttm_mem_reg *mem,
  887. uint32_t *new_flags)
  888. {
  889. if (ttm_bo_places_compat(placement->placement, placement->num_placement,
  890. mem, new_flags))
  891. return true;
  892. if ((placement->busy_placement != placement->placement ||
  893. placement->num_busy_placement > placement->num_placement) &&
  894. ttm_bo_places_compat(placement->busy_placement,
  895. placement->num_busy_placement,
  896. mem, new_flags))
  897. return true;
  898. return false;
  899. }
  900. EXPORT_SYMBOL(ttm_bo_mem_compat);
  901. int ttm_bo_validate(struct ttm_buffer_object *bo,
  902. struct ttm_placement *placement,
  903. struct ttm_operation_ctx *ctx)
  904. {
  905. int ret;
  906. uint32_t new_flags;
  907. reservation_object_assert_held(bo->resv);
  908. /*
  909. * Check whether we need to move buffer.
  910. */
  911. if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
  912. ret = ttm_bo_move_buffer(bo, placement, ctx);
  913. if (ret)
  914. return ret;
  915. } else {
  916. /*
  917. * Use the access and other non-mapping-related flag bits from
  918. * the compatible memory placement flags to the active flags
  919. */
  920. ttm_flag_masked(&bo->mem.placement, new_flags,
  921. ~TTM_PL_MASK_MEMTYPE);
  922. }
  923. /*
  924. * We might need to add a TTM.
  925. */
  926. if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
  927. ret = ttm_tt_create(bo, true);
  928. if (ret)
  929. return ret;
  930. }
  931. return 0;
  932. }
  933. EXPORT_SYMBOL(ttm_bo_validate);
  934. int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
  935. struct ttm_buffer_object *bo,
  936. unsigned long size,
  937. enum ttm_bo_type type,
  938. struct ttm_placement *placement,
  939. uint32_t page_alignment,
  940. struct ttm_operation_ctx *ctx,
  941. size_t acc_size,
  942. struct sg_table *sg,
  943. struct reservation_object *resv,
  944. void (*destroy) (struct ttm_buffer_object *))
  945. {
  946. int ret = 0;
  947. unsigned long num_pages;
  948. struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
  949. bool locked;
  950. ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
  951. if (ret) {
  952. pr_err("Out of kernel memory\n");
  953. if (destroy)
  954. (*destroy)(bo);
  955. else
  956. kfree(bo);
  957. return -ENOMEM;
  958. }
  959. num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  960. if (num_pages == 0) {
  961. pr_err("Illegal buffer object size\n");
  962. if (destroy)
  963. (*destroy)(bo);
  964. else
  965. kfree(bo);
  966. ttm_mem_global_free(mem_glob, acc_size);
  967. return -EINVAL;
  968. }
  969. bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
  970. kref_init(&bo->kref);
  971. kref_init(&bo->list_kref);
  972. atomic_set(&bo->cpu_writers, 0);
  973. INIT_LIST_HEAD(&bo->lru);
  974. INIT_LIST_HEAD(&bo->ddestroy);
  975. INIT_LIST_HEAD(&bo->swap);
  976. INIT_LIST_HEAD(&bo->io_reserve_lru);
  977. mutex_init(&bo->wu_mutex);
  978. bo->bdev = bdev;
  979. bo->type = type;
  980. bo->num_pages = num_pages;
  981. bo->mem.size = num_pages << PAGE_SHIFT;
  982. bo->mem.mem_type = TTM_PL_SYSTEM;
  983. bo->mem.num_pages = bo->num_pages;
  984. bo->mem.mm_node = NULL;
  985. bo->mem.page_alignment = page_alignment;
  986. bo->mem.bus.io_reserved_vm = false;
  987. bo->mem.bus.io_reserved_count = 0;
  988. bo->moving = NULL;
  989. bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
  990. bo->acc_size = acc_size;
  991. bo->sg = sg;
  992. if (resv) {
  993. bo->resv = resv;
  994. reservation_object_assert_held(bo->resv);
  995. } else {
  996. bo->resv = &bo->ttm_resv;
  997. }
  998. reservation_object_init(&bo->ttm_resv);
  999. atomic_inc(&bo->bdev->glob->bo_count);
  1000. drm_vma_node_reset(&bo->vma_node);
  1001. /*
  1002. * For ttm_bo_type_device buffers, allocate
  1003. * address space from the device.
  1004. */
  1005. if (bo->type == ttm_bo_type_device ||
  1006. bo->type == ttm_bo_type_sg)
  1007. ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
  1008. bo->mem.num_pages);
  1009. /* passed reservation objects should already be locked,
  1010. * since otherwise lockdep will be angered in radeon.
  1011. */
  1012. if (!resv) {
  1013. locked = reservation_object_trylock(bo->resv);
  1014. WARN_ON(!locked);
  1015. }
  1016. if (likely(!ret))
  1017. ret = ttm_bo_validate(bo, placement, ctx);
  1018. if (unlikely(ret)) {
  1019. if (!resv)
  1020. ttm_bo_unreserve(bo);
  1021. ttm_bo_unref(&bo);
  1022. return ret;
  1023. }
  1024. if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
  1025. spin_lock(&bdev->glob->lru_lock);
  1026. ttm_bo_add_to_lru(bo);
  1027. spin_unlock(&bdev->glob->lru_lock);
  1028. }
  1029. return ret;
  1030. }
  1031. EXPORT_SYMBOL(ttm_bo_init_reserved);
  1032. int ttm_bo_init(struct ttm_bo_device *bdev,
  1033. struct ttm_buffer_object *bo,
  1034. unsigned long size,
  1035. enum ttm_bo_type type,
  1036. struct ttm_placement *placement,
  1037. uint32_t page_alignment,
  1038. bool interruptible,
  1039. size_t acc_size,
  1040. struct sg_table *sg,
  1041. struct reservation_object *resv,
  1042. void (*destroy) (struct ttm_buffer_object *))
  1043. {
  1044. struct ttm_operation_ctx ctx = { interruptible, false };
  1045. int ret;
  1046. ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
  1047. page_alignment, &ctx, acc_size,
  1048. sg, resv, destroy);
  1049. if (ret)
  1050. return ret;
  1051. if (!resv)
  1052. ttm_bo_unreserve(bo);
  1053. return 0;
  1054. }
  1055. EXPORT_SYMBOL(ttm_bo_init);
  1056. size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
  1057. unsigned long bo_size,
  1058. unsigned struct_size)
  1059. {
  1060. unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
  1061. size_t size = 0;
  1062. size += ttm_round_pot(struct_size);
  1063. size += ttm_round_pot(npages * sizeof(void *));
  1064. size += ttm_round_pot(sizeof(struct ttm_tt));
  1065. return size;
  1066. }
  1067. EXPORT_SYMBOL(ttm_bo_acc_size);
  1068. size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
  1069. unsigned long bo_size,
  1070. unsigned struct_size)
  1071. {
  1072. unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
  1073. size_t size = 0;
  1074. size += ttm_round_pot(struct_size);
  1075. size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
  1076. size += ttm_round_pot(sizeof(struct ttm_dma_tt));
  1077. return size;
  1078. }
  1079. EXPORT_SYMBOL(ttm_bo_dma_acc_size);
  1080. int ttm_bo_create(struct ttm_bo_device *bdev,
  1081. unsigned long size,
  1082. enum ttm_bo_type type,
  1083. struct ttm_placement *placement,
  1084. uint32_t page_alignment,
  1085. bool interruptible,
  1086. struct ttm_buffer_object **p_bo)
  1087. {
  1088. struct ttm_buffer_object *bo;
  1089. size_t acc_size;
  1090. int ret;
  1091. bo = kzalloc(sizeof(*bo), GFP_KERNEL);
  1092. if (unlikely(bo == NULL))
  1093. return -ENOMEM;
  1094. acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
  1095. ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
  1096. interruptible, acc_size,
  1097. NULL, NULL, NULL);
  1098. if (likely(ret == 0))
  1099. *p_bo = bo;
  1100. return ret;
  1101. }
  1102. EXPORT_SYMBOL(ttm_bo_create);
  1103. static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
  1104. unsigned mem_type)
  1105. {
  1106. struct ttm_operation_ctx ctx = {
  1107. .interruptible = false,
  1108. .no_wait_gpu = false,
  1109. .flags = TTM_OPT_FLAG_FORCE_ALLOC
  1110. };
  1111. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  1112. struct ttm_bo_global *glob = bdev->glob;
  1113. struct dma_fence *fence;
  1114. int ret;
  1115. unsigned i;
  1116. /*
  1117. * Can't use standard list traversal since we're unlocking.
  1118. */
  1119. spin_lock(&glob->lru_lock);
  1120. for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
  1121. while (!list_empty(&man->lru[i])) {
  1122. spin_unlock(&glob->lru_lock);
  1123. ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
  1124. if (ret)
  1125. return ret;
  1126. spin_lock(&glob->lru_lock);
  1127. }
  1128. }
  1129. spin_unlock(&glob->lru_lock);
  1130. spin_lock(&man->move_lock);
  1131. fence = dma_fence_get(man->move);
  1132. spin_unlock(&man->move_lock);
  1133. if (fence) {
  1134. ret = dma_fence_wait(fence, false);
  1135. dma_fence_put(fence);
  1136. if (ret)
  1137. return ret;
  1138. }
  1139. return 0;
  1140. }
  1141. int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
  1142. {
  1143. struct ttm_mem_type_manager *man;
  1144. int ret = -EINVAL;
  1145. if (mem_type >= TTM_NUM_MEM_TYPES) {
  1146. pr_err("Illegal memory type %d\n", mem_type);
  1147. return ret;
  1148. }
  1149. man = &bdev->man[mem_type];
  1150. if (!man->has_type) {
  1151. pr_err("Trying to take down uninitialized memory manager type %u\n",
  1152. mem_type);
  1153. return ret;
  1154. }
  1155. man->use_type = false;
  1156. man->has_type = false;
  1157. ret = 0;
  1158. if (mem_type > 0) {
  1159. ret = ttm_bo_force_list_clean(bdev, mem_type);
  1160. if (ret) {
  1161. pr_err("Cleanup eviction failed\n");
  1162. return ret;
  1163. }
  1164. ret = (*man->func->takedown)(man);
  1165. }
  1166. dma_fence_put(man->move);
  1167. man->move = NULL;
  1168. return ret;
  1169. }
  1170. EXPORT_SYMBOL(ttm_bo_clean_mm);
  1171. int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
  1172. {
  1173. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  1174. if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
  1175. pr_err("Illegal memory manager memory type %u\n", mem_type);
  1176. return -EINVAL;
  1177. }
  1178. if (!man->has_type) {
  1179. pr_err("Memory type %u has not been initialized\n", mem_type);
  1180. return 0;
  1181. }
  1182. return ttm_bo_force_list_clean(bdev, mem_type);
  1183. }
  1184. EXPORT_SYMBOL(ttm_bo_evict_mm);
  1185. int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
  1186. unsigned long p_size)
  1187. {
  1188. int ret;
  1189. struct ttm_mem_type_manager *man;
  1190. unsigned i;
  1191. BUG_ON(type >= TTM_NUM_MEM_TYPES);
  1192. man = &bdev->man[type];
  1193. BUG_ON(man->has_type);
  1194. man->io_reserve_fastpath = true;
  1195. man->use_io_reserve_lru = false;
  1196. mutex_init(&man->io_reserve_mutex);
  1197. spin_lock_init(&man->move_lock);
  1198. INIT_LIST_HEAD(&man->io_reserve_lru);
  1199. ret = bdev->driver->init_mem_type(bdev, type, man);
  1200. if (ret)
  1201. return ret;
  1202. man->bdev = bdev;
  1203. if (type != TTM_PL_SYSTEM) {
  1204. ret = (*man->func->init)(man, p_size);
  1205. if (ret)
  1206. return ret;
  1207. }
  1208. man->has_type = true;
  1209. man->use_type = true;
  1210. man->size = p_size;
  1211. for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
  1212. INIT_LIST_HEAD(&man->lru[i]);
  1213. man->move = NULL;
  1214. return 0;
  1215. }
  1216. EXPORT_SYMBOL(ttm_bo_init_mm);
  1217. static void ttm_bo_global_kobj_release(struct kobject *kobj)
  1218. {
  1219. struct ttm_bo_global *glob =
  1220. container_of(kobj, struct ttm_bo_global, kobj);
  1221. __free_page(glob->dummy_read_page);
  1222. kfree(glob);
  1223. }
  1224. void ttm_bo_global_release(struct drm_global_reference *ref)
  1225. {
  1226. struct ttm_bo_global *glob = ref->object;
  1227. kobject_del(&glob->kobj);
  1228. kobject_put(&glob->kobj);
  1229. }
  1230. EXPORT_SYMBOL(ttm_bo_global_release);
  1231. int ttm_bo_global_init(struct drm_global_reference *ref)
  1232. {
  1233. struct ttm_bo_global_ref *bo_ref =
  1234. container_of(ref, struct ttm_bo_global_ref, ref);
  1235. struct ttm_bo_global *glob = ref->object;
  1236. int ret;
  1237. unsigned i;
  1238. mutex_init(&glob->device_list_mutex);
  1239. spin_lock_init(&glob->lru_lock);
  1240. glob->mem_glob = bo_ref->mem_glob;
  1241. glob->mem_glob->bo_glob = glob;
  1242. glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
  1243. if (unlikely(glob->dummy_read_page == NULL)) {
  1244. ret = -ENOMEM;
  1245. goto out_no_drp;
  1246. }
  1247. for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
  1248. INIT_LIST_HEAD(&glob->swap_lru[i]);
  1249. INIT_LIST_HEAD(&glob->device_list);
  1250. atomic_set(&glob->bo_count, 0);
  1251. ret = kobject_init_and_add(
  1252. &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
  1253. if (unlikely(ret != 0))
  1254. kobject_put(&glob->kobj);
  1255. return ret;
  1256. out_no_drp:
  1257. kfree(glob);
  1258. return ret;
  1259. }
  1260. EXPORT_SYMBOL(ttm_bo_global_init);
  1261. int ttm_bo_device_release(struct ttm_bo_device *bdev)
  1262. {
  1263. int ret = 0;
  1264. unsigned i = TTM_NUM_MEM_TYPES;
  1265. struct ttm_mem_type_manager *man;
  1266. struct ttm_bo_global *glob = bdev->glob;
  1267. while (i--) {
  1268. man = &bdev->man[i];
  1269. if (man->has_type) {
  1270. man->use_type = false;
  1271. if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
  1272. ret = -EBUSY;
  1273. pr_err("DRM memory manager type %d is not clean\n",
  1274. i);
  1275. }
  1276. man->has_type = false;
  1277. }
  1278. }
  1279. mutex_lock(&glob->device_list_mutex);
  1280. list_del(&bdev->device_list);
  1281. mutex_unlock(&glob->device_list_mutex);
  1282. cancel_delayed_work_sync(&bdev->wq);
  1283. if (ttm_bo_delayed_delete(bdev, true))
  1284. pr_debug("Delayed destroy list was clean\n");
  1285. spin_lock(&glob->lru_lock);
  1286. for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
  1287. if (list_empty(&bdev->man[0].lru[0]))
  1288. pr_debug("Swap list %d was clean\n", i);
  1289. spin_unlock(&glob->lru_lock);
  1290. drm_vma_offset_manager_destroy(&bdev->vma_manager);
  1291. return ret;
  1292. }
  1293. EXPORT_SYMBOL(ttm_bo_device_release);
  1294. int ttm_bo_device_init(struct ttm_bo_device *bdev,
  1295. struct ttm_bo_global *glob,
  1296. struct ttm_bo_driver *driver,
  1297. struct address_space *mapping,
  1298. uint64_t file_page_offset,
  1299. bool need_dma32)
  1300. {
  1301. int ret = -EINVAL;
  1302. bdev->driver = driver;
  1303. memset(bdev->man, 0, sizeof(bdev->man));
  1304. /*
  1305. * Initialize the system memory buffer type.
  1306. * Other types need to be driver / IOCTL initialized.
  1307. */
  1308. ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
  1309. if (unlikely(ret != 0))
  1310. goto out_no_sys;
  1311. drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
  1312. 0x10000000);
  1313. INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
  1314. INIT_LIST_HEAD(&bdev->ddestroy);
  1315. bdev->dev_mapping = mapping;
  1316. bdev->glob = glob;
  1317. bdev->need_dma32 = need_dma32;
  1318. mutex_lock(&glob->device_list_mutex);
  1319. list_add_tail(&bdev->device_list, &glob->device_list);
  1320. mutex_unlock(&glob->device_list_mutex);
  1321. return 0;
  1322. out_no_sys:
  1323. return ret;
  1324. }
  1325. EXPORT_SYMBOL(ttm_bo_device_init);
  1326. /*
  1327. * buffer object vm functions.
  1328. */
  1329. bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  1330. {
  1331. struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
  1332. if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
  1333. if (mem->mem_type == TTM_PL_SYSTEM)
  1334. return false;
  1335. if (man->flags & TTM_MEMTYPE_FLAG_CMA)
  1336. return false;
  1337. if (mem->placement & TTM_PL_FLAG_CACHED)
  1338. return false;
  1339. }
  1340. return true;
  1341. }
  1342. void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
  1343. {
  1344. struct ttm_bo_device *bdev = bo->bdev;
  1345. drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
  1346. ttm_mem_io_free_vm(bo);
  1347. }
  1348. void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
  1349. {
  1350. struct ttm_bo_device *bdev = bo->bdev;
  1351. struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
  1352. ttm_mem_io_lock(man, false);
  1353. ttm_bo_unmap_virtual_locked(bo);
  1354. ttm_mem_io_unlock(man);
  1355. }
  1356. EXPORT_SYMBOL(ttm_bo_unmap_virtual);
  1357. int ttm_bo_wait(struct ttm_buffer_object *bo,
  1358. bool interruptible, bool no_wait)
  1359. {
  1360. long timeout = 15 * HZ;
  1361. if (no_wait) {
  1362. if (reservation_object_test_signaled_rcu(bo->resv, true))
  1363. return 0;
  1364. else
  1365. return -EBUSY;
  1366. }
  1367. timeout = reservation_object_wait_timeout_rcu(bo->resv, true,
  1368. interruptible, timeout);
  1369. if (timeout < 0)
  1370. return timeout;
  1371. if (timeout == 0)
  1372. return -EBUSY;
  1373. reservation_object_add_excl_fence(bo->resv, NULL);
  1374. return 0;
  1375. }
  1376. EXPORT_SYMBOL(ttm_bo_wait);
  1377. int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
  1378. {
  1379. int ret = 0;
  1380. /*
  1381. * Using ttm_bo_reserve makes sure the lru lists are updated.
  1382. */
  1383. ret = ttm_bo_reserve(bo, true, no_wait, NULL);
  1384. if (unlikely(ret != 0))
  1385. return ret;
  1386. ret = ttm_bo_wait(bo, true, no_wait);
  1387. if (likely(ret == 0))
  1388. atomic_inc(&bo->cpu_writers);
  1389. ttm_bo_unreserve(bo);
  1390. return ret;
  1391. }
  1392. EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
  1393. void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
  1394. {
  1395. atomic_dec(&bo->cpu_writers);
  1396. }
  1397. EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
  1398. /**
  1399. * A buffer object shrink method that tries to swap out the first
  1400. * buffer object on the bo_global::swap_lru list.
  1401. */
  1402. int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
  1403. {
  1404. struct ttm_buffer_object *bo;
  1405. int ret = -EBUSY;
  1406. bool locked;
  1407. unsigned i;
  1408. spin_lock(&glob->lru_lock);
  1409. for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
  1410. list_for_each_entry(bo, &glob->swap_lru[i], swap) {
  1411. if (ttm_bo_evict_swapout_allowable(bo, ctx, &locked)) {
  1412. ret = 0;
  1413. break;
  1414. }
  1415. }
  1416. if (!ret)
  1417. break;
  1418. }
  1419. if (ret) {
  1420. spin_unlock(&glob->lru_lock);
  1421. return ret;
  1422. }
  1423. kref_get(&bo->list_kref);
  1424. if (!list_empty(&bo->ddestroy)) {
  1425. ret = ttm_bo_cleanup_refs(bo, false, false, locked);
  1426. kref_put(&bo->list_kref, ttm_bo_release_list);
  1427. return ret;
  1428. }
  1429. ttm_bo_del_from_lru(bo);
  1430. spin_unlock(&glob->lru_lock);
  1431. /**
  1432. * Move to system cached
  1433. */
  1434. if (bo->mem.mem_type != TTM_PL_SYSTEM ||
  1435. bo->ttm->caching_state != tt_cached) {
  1436. struct ttm_operation_ctx ctx = { false, false };
  1437. struct ttm_mem_reg evict_mem;
  1438. evict_mem = bo->mem;
  1439. evict_mem.mm_node = NULL;
  1440. evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
  1441. evict_mem.mem_type = TTM_PL_SYSTEM;
  1442. ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx);
  1443. if (unlikely(ret != 0))
  1444. goto out;
  1445. }
  1446. /**
  1447. * Make sure BO is idle.
  1448. */
  1449. ret = ttm_bo_wait(bo, false, false);
  1450. if (unlikely(ret != 0))
  1451. goto out;
  1452. ttm_bo_unmap_virtual(bo);
  1453. /**
  1454. * Swap out. Buffer will be swapped in again as soon as
  1455. * anyone tries to access a ttm page.
  1456. */
  1457. if (bo->bdev->driver->swap_notify)
  1458. bo->bdev->driver->swap_notify(bo);
  1459. ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
  1460. out:
  1461. /**
  1462. *
  1463. * Unreserve without putting on LRU to avoid swapping out an
  1464. * already swapped buffer.
  1465. */
  1466. if (locked)
  1467. reservation_object_unlock(bo->resv);
  1468. kref_put(&bo->list_kref, ttm_bo_release_list);
  1469. return ret;
  1470. }
  1471. EXPORT_SYMBOL(ttm_bo_swapout);
  1472. void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
  1473. {
  1474. struct ttm_operation_ctx ctx = {
  1475. .interruptible = false,
  1476. .no_wait_gpu = false
  1477. };
  1478. while (ttm_bo_swapout(bdev->glob, &ctx) == 0)
  1479. ;
  1480. }
  1481. EXPORT_SYMBOL(ttm_bo_swapout_all);
  1482. /**
  1483. * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
  1484. * unreserved
  1485. *
  1486. * @bo: Pointer to buffer
  1487. */
  1488. int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
  1489. {
  1490. int ret;
  1491. /*
  1492. * In the absense of a wait_unlocked API,
  1493. * Use the bo::wu_mutex to avoid triggering livelocks due to
  1494. * concurrent use of this function. Note that this use of
  1495. * bo::wu_mutex can go away if we change locking order to
  1496. * mmap_sem -> bo::reserve.
  1497. */
  1498. ret = mutex_lock_interruptible(&bo->wu_mutex);
  1499. if (unlikely(ret != 0))
  1500. return -ERESTARTSYS;
  1501. if (!ww_mutex_is_locked(&bo->resv->lock))
  1502. goto out_unlock;
  1503. ret = reservation_object_lock_interruptible(bo->resv, NULL);
  1504. if (ret == -EINTR)
  1505. ret = -ERESTARTSYS;
  1506. if (unlikely(ret != 0))
  1507. goto out_unlock;
  1508. reservation_object_unlock(bo->resv);
  1509. out_unlock:
  1510. mutex_unlock(&bo->wu_mutex);
  1511. return ret;
  1512. }