ttm_bo.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833
  1. /**************************************************************************
  2. *
  3. * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. /*
  28. * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29. */
  30. #define pr_fmt(fmt) "[TTM] " fmt
  31. #include <drm/ttm/ttm_module.h>
  32. #include <drm/ttm/ttm_bo_driver.h>
  33. #include <drm/ttm/ttm_placement.h>
  34. #include <linux/jiffies.h>
  35. #include <linux/slab.h>
  36. #include <linux/sched.h>
  37. #include <linux/mm.h>
  38. #include <linux/file.h>
  39. #include <linux/module.h>
  40. #include <linux/atomic.h>
  41. #include <linux/reservation.h>
  42. static void ttm_bo_global_kobj_release(struct kobject *kobj);
  43. static struct attribute ttm_bo_count = {
  44. .name = "bo_count",
  45. .mode = S_IRUGO
  46. };
  47. static inline int ttm_mem_type_from_place(const struct ttm_place *place,
  48. uint32_t *mem_type)
  49. {
  50. int pos;
  51. pos = ffs(place->flags & TTM_PL_MASK_MEM);
  52. if (unlikely(!pos))
  53. return -EINVAL;
  54. *mem_type = pos - 1;
  55. return 0;
  56. }
  57. static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
  58. {
  59. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  60. struct drm_printer p = drm_debug_printer(TTM_PFX);
  61. pr_err(" has_type: %d\n", man->has_type);
  62. pr_err(" use_type: %d\n", man->use_type);
  63. pr_err(" flags: 0x%08X\n", man->flags);
  64. pr_err(" gpu_offset: 0x%08llX\n", man->gpu_offset);
  65. pr_err(" size: %llu\n", man->size);
  66. pr_err(" available_caching: 0x%08X\n", man->available_caching);
  67. pr_err(" default_caching: 0x%08X\n", man->default_caching);
  68. if (mem_type != TTM_PL_SYSTEM)
  69. (*man->func->debug)(man, &p);
  70. }
  71. static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
  72. struct ttm_placement *placement)
  73. {
  74. int i, ret, mem_type;
  75. pr_err("No space for %p (%lu pages, %luK, %luM)\n",
  76. bo, bo->mem.num_pages, bo->mem.size >> 10,
  77. bo->mem.size >> 20);
  78. for (i = 0; i < placement->num_placement; i++) {
  79. ret = ttm_mem_type_from_place(&placement->placement[i],
  80. &mem_type);
  81. if (ret)
  82. return;
  83. pr_err(" placement[%d]=0x%08X (%d)\n",
  84. i, placement->placement[i].flags, mem_type);
  85. ttm_mem_type_debug(bo->bdev, mem_type);
  86. }
  87. }
  88. static ssize_t ttm_bo_global_show(struct kobject *kobj,
  89. struct attribute *attr,
  90. char *buffer)
  91. {
  92. struct ttm_bo_global *glob =
  93. container_of(kobj, struct ttm_bo_global, kobj);
  94. return snprintf(buffer, PAGE_SIZE, "%d\n",
  95. atomic_read(&glob->bo_count));
  96. }
  97. static struct attribute *ttm_bo_global_attrs[] = {
  98. &ttm_bo_count,
  99. NULL
  100. };
  101. static const struct sysfs_ops ttm_bo_global_ops = {
  102. .show = &ttm_bo_global_show
  103. };
  104. static struct kobj_type ttm_bo_glob_kobj_type = {
  105. .release = &ttm_bo_global_kobj_release,
  106. .sysfs_ops = &ttm_bo_global_ops,
  107. .default_attrs = ttm_bo_global_attrs
  108. };
  109. static inline uint32_t ttm_bo_type_flags(unsigned type)
  110. {
  111. return 1 << (type);
  112. }
  113. static void ttm_bo_release_list(struct kref *list_kref)
  114. {
  115. struct ttm_buffer_object *bo =
  116. container_of(list_kref, struct ttm_buffer_object, list_kref);
  117. struct ttm_bo_device *bdev = bo->bdev;
  118. size_t acc_size = bo->acc_size;
  119. BUG_ON(kref_read(&bo->list_kref));
  120. BUG_ON(kref_read(&bo->kref));
  121. BUG_ON(atomic_read(&bo->cpu_writers));
  122. BUG_ON(bo->mem.mm_node != NULL);
  123. BUG_ON(!list_empty(&bo->lru));
  124. BUG_ON(!list_empty(&bo->ddestroy));
  125. ttm_tt_destroy(bo->ttm);
  126. atomic_dec(&bo->glob->bo_count);
  127. dma_fence_put(bo->moving);
  128. reservation_object_fini(&bo->ttm_resv);
  129. mutex_destroy(&bo->wu_mutex);
  130. if (bo->destroy)
  131. bo->destroy(bo);
  132. else {
  133. kfree(bo);
  134. }
  135. ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
  136. }
  137. void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
  138. {
  139. struct ttm_bo_device *bdev = bo->bdev;
  140. struct ttm_mem_type_manager *man;
  141. reservation_object_assert_held(bo->resv);
  142. if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
  143. BUG_ON(!list_empty(&bo->lru));
  144. man = &bdev->man[bo->mem.mem_type];
  145. list_add_tail(&bo->lru, &man->lru[bo->priority]);
  146. kref_get(&bo->list_kref);
  147. if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
  148. list_add_tail(&bo->swap,
  149. &bo->glob->swap_lru[bo->priority]);
  150. kref_get(&bo->list_kref);
  151. }
  152. }
  153. }
  154. EXPORT_SYMBOL(ttm_bo_add_to_lru);
  155. static void ttm_bo_ref_bug(struct kref *list_kref)
  156. {
  157. BUG();
  158. }
  159. void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
  160. {
  161. if (!list_empty(&bo->swap)) {
  162. list_del_init(&bo->swap);
  163. kref_put(&bo->list_kref, ttm_bo_ref_bug);
  164. }
  165. if (!list_empty(&bo->lru)) {
  166. list_del_init(&bo->lru);
  167. kref_put(&bo->list_kref, ttm_bo_ref_bug);
  168. }
  169. /*
  170. * TODO: Add a driver hook to delete from
  171. * driver-specific LRU's here.
  172. */
  173. }
  174. void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
  175. {
  176. spin_lock(&bo->glob->lru_lock);
  177. ttm_bo_del_from_lru(bo);
  178. spin_unlock(&bo->glob->lru_lock);
  179. }
  180. EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
  181. void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
  182. {
  183. reservation_object_assert_held(bo->resv);
  184. ttm_bo_del_from_lru(bo);
  185. ttm_bo_add_to_lru(bo);
  186. }
  187. EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
  188. /*
  189. * Call bo->mutex locked.
  190. */
  191. static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
  192. {
  193. struct ttm_bo_device *bdev = bo->bdev;
  194. struct ttm_bo_global *glob = bo->glob;
  195. int ret = 0;
  196. uint32_t page_flags = 0;
  197. reservation_object_assert_held(bo->resv);
  198. bo->ttm = NULL;
  199. if (bdev->need_dma32)
  200. page_flags |= TTM_PAGE_FLAG_DMA32;
  201. switch (bo->type) {
  202. case ttm_bo_type_device:
  203. if (zero_alloc)
  204. page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
  205. case ttm_bo_type_kernel:
  206. bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
  207. page_flags, glob->dummy_read_page);
  208. if (unlikely(bo->ttm == NULL))
  209. ret = -ENOMEM;
  210. break;
  211. case ttm_bo_type_sg:
  212. bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
  213. page_flags | TTM_PAGE_FLAG_SG,
  214. glob->dummy_read_page);
  215. if (unlikely(bo->ttm == NULL)) {
  216. ret = -ENOMEM;
  217. break;
  218. }
  219. bo->ttm->sg = bo->sg;
  220. break;
  221. default:
  222. pr_err("Illegal buffer object type\n");
  223. ret = -EINVAL;
  224. break;
  225. }
  226. return ret;
  227. }
  228. static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
  229. struct ttm_mem_reg *mem, bool evict,
  230. struct ttm_operation_ctx *ctx)
  231. {
  232. struct ttm_bo_device *bdev = bo->bdev;
  233. bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
  234. bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
  235. struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
  236. struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
  237. int ret = 0;
  238. if (old_is_pci || new_is_pci ||
  239. ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
  240. ret = ttm_mem_io_lock(old_man, true);
  241. if (unlikely(ret != 0))
  242. goto out_err;
  243. ttm_bo_unmap_virtual_locked(bo);
  244. ttm_mem_io_unlock(old_man);
  245. }
  246. /*
  247. * Create and bind a ttm if required.
  248. */
  249. if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
  250. if (bo->ttm == NULL) {
  251. bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
  252. ret = ttm_bo_add_ttm(bo, zero);
  253. if (ret)
  254. goto out_err;
  255. }
  256. ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
  257. if (ret)
  258. goto out_err;
  259. if (mem->mem_type != TTM_PL_SYSTEM) {
  260. ret = ttm_tt_bind(bo->ttm, mem, ctx);
  261. if (ret)
  262. goto out_err;
  263. }
  264. if (bo->mem.mem_type == TTM_PL_SYSTEM) {
  265. if (bdev->driver->move_notify)
  266. bdev->driver->move_notify(bo, evict, mem);
  267. bo->mem = *mem;
  268. mem->mm_node = NULL;
  269. goto moved;
  270. }
  271. }
  272. if (bdev->driver->move_notify)
  273. bdev->driver->move_notify(bo, evict, mem);
  274. if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
  275. !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
  276. ret = ttm_bo_move_ttm(bo, ctx, mem);
  277. else if (bdev->driver->move)
  278. ret = bdev->driver->move(bo, evict, ctx, mem);
  279. else
  280. ret = ttm_bo_move_memcpy(bo, ctx, mem);
  281. if (ret) {
  282. if (bdev->driver->move_notify) {
  283. struct ttm_mem_reg tmp_mem = *mem;
  284. *mem = bo->mem;
  285. bo->mem = tmp_mem;
  286. bdev->driver->move_notify(bo, false, mem);
  287. bo->mem = *mem;
  288. *mem = tmp_mem;
  289. }
  290. goto out_err;
  291. }
  292. moved:
  293. if (bo->evicted) {
  294. if (bdev->driver->invalidate_caches) {
  295. ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
  296. if (ret)
  297. pr_err("Can not flush read caches\n");
  298. }
  299. bo->evicted = false;
  300. }
  301. if (bo->mem.mm_node)
  302. bo->offset = (bo->mem.start << PAGE_SHIFT) +
  303. bdev->man[bo->mem.mem_type].gpu_offset;
  304. else
  305. bo->offset = 0;
  306. ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
  307. return 0;
  308. out_err:
  309. new_man = &bdev->man[bo->mem.mem_type];
  310. if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) {
  311. ttm_tt_destroy(bo->ttm);
  312. bo->ttm = NULL;
  313. }
  314. return ret;
  315. }
  316. /**
  317. * Call bo::reserved.
  318. * Will release GPU memory type usage on destruction.
  319. * This is the place to put in driver specific hooks to release
  320. * driver private resources.
  321. * Will release the bo::reserved lock.
  322. */
  323. static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
  324. {
  325. if (bo->bdev->driver->move_notify)
  326. bo->bdev->driver->move_notify(bo, false, NULL);
  327. ttm_tt_destroy(bo->ttm);
  328. bo->ttm = NULL;
  329. ttm_bo_mem_put(bo, &bo->mem);
  330. }
  331. static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
  332. {
  333. int r;
  334. if (bo->resv == &bo->ttm_resv)
  335. return 0;
  336. BUG_ON(!reservation_object_trylock(&bo->ttm_resv));
  337. r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv);
  338. if (r)
  339. reservation_object_unlock(&bo->ttm_resv);
  340. return r;
  341. }
  342. static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
  343. {
  344. struct reservation_object_list *fobj;
  345. struct dma_fence *fence;
  346. int i;
  347. fobj = reservation_object_get_list(&bo->ttm_resv);
  348. fence = reservation_object_get_excl(&bo->ttm_resv);
  349. if (fence && !fence->ops->signaled)
  350. dma_fence_enable_sw_signaling(fence);
  351. for (i = 0; fobj && i < fobj->shared_count; ++i) {
  352. fence = rcu_dereference_protected(fobj->shared[i],
  353. reservation_object_held(bo->resv));
  354. if (!fence->ops->signaled)
  355. dma_fence_enable_sw_signaling(fence);
  356. }
  357. }
  358. static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
  359. {
  360. struct ttm_bo_device *bdev = bo->bdev;
  361. struct ttm_bo_global *glob = bo->glob;
  362. int ret;
  363. ret = ttm_bo_individualize_resv(bo);
  364. if (ret) {
  365. /* Last resort, if we fail to allocate memory for the
  366. * fences block for the BO to become idle
  367. */
  368. reservation_object_wait_timeout_rcu(bo->resv, true, false,
  369. 30 * HZ);
  370. spin_lock(&glob->lru_lock);
  371. goto error;
  372. }
  373. spin_lock(&glob->lru_lock);
  374. ret = reservation_object_trylock(bo->resv) ? 0 : -EBUSY;
  375. if (!ret) {
  376. if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) {
  377. ttm_bo_del_from_lru(bo);
  378. spin_unlock(&glob->lru_lock);
  379. if (bo->resv != &bo->ttm_resv)
  380. reservation_object_unlock(&bo->ttm_resv);
  381. ttm_bo_cleanup_memtype_use(bo);
  382. reservation_object_unlock(bo->resv);
  383. return;
  384. }
  385. ttm_bo_flush_all_fences(bo);
  386. /*
  387. * Make NO_EVICT bos immediately available to
  388. * shrinkers, now that they are queued for
  389. * destruction.
  390. */
  391. if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
  392. bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
  393. ttm_bo_add_to_lru(bo);
  394. }
  395. reservation_object_unlock(bo->resv);
  396. }
  397. if (bo->resv != &bo->ttm_resv)
  398. reservation_object_unlock(&bo->ttm_resv);
  399. error:
  400. kref_get(&bo->list_kref);
  401. list_add_tail(&bo->ddestroy, &bdev->ddestroy);
  402. spin_unlock(&glob->lru_lock);
  403. schedule_delayed_work(&bdev->wq,
  404. ((HZ / 100) < 1) ? 1 : HZ / 100);
  405. }
  406. /**
  407. * function ttm_bo_cleanup_refs
  408. * If bo idle, remove from delayed- and lru lists, and unref.
  409. * If not idle, do nothing.
  410. *
  411. * Must be called with lru_lock and reservation held, this function
  412. * will drop the lru lock and optionally the reservation lock before returning.
  413. *
  414. * @interruptible Any sleeps should occur interruptibly.
  415. * @no_wait_gpu Never wait for gpu. Return -EBUSY instead.
  416. * @unlock_resv Unlock the reservation lock as well.
  417. */
  418. static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
  419. bool interruptible, bool no_wait_gpu,
  420. bool unlock_resv)
  421. {
  422. struct ttm_bo_global *glob = bo->glob;
  423. struct reservation_object *resv;
  424. int ret;
  425. if (unlikely(list_empty(&bo->ddestroy)))
  426. resv = bo->resv;
  427. else
  428. resv = &bo->ttm_resv;
  429. if (reservation_object_test_signaled_rcu(resv, true))
  430. ret = 0;
  431. else
  432. ret = -EBUSY;
  433. if (ret && !no_wait_gpu) {
  434. long lret;
  435. if (unlock_resv)
  436. reservation_object_unlock(bo->resv);
  437. spin_unlock(&glob->lru_lock);
  438. lret = reservation_object_wait_timeout_rcu(resv, true,
  439. interruptible,
  440. 30 * HZ);
  441. if (lret < 0)
  442. return lret;
  443. else if (lret == 0)
  444. return -EBUSY;
  445. spin_lock(&glob->lru_lock);
  446. if (unlock_resv && !reservation_object_trylock(bo->resv)) {
  447. /*
  448. * We raced, and lost, someone else holds the reservation now,
  449. * and is probably busy in ttm_bo_cleanup_memtype_use.
  450. *
  451. * Even if it's not the case, because we finished waiting any
  452. * delayed destruction would succeed, so just return success
  453. * here.
  454. */
  455. spin_unlock(&glob->lru_lock);
  456. return 0;
  457. }
  458. ret = 0;
  459. }
  460. if (ret || unlikely(list_empty(&bo->ddestroy))) {
  461. if (unlock_resv)
  462. reservation_object_unlock(bo->resv);
  463. spin_unlock(&glob->lru_lock);
  464. return ret;
  465. }
  466. ttm_bo_del_from_lru(bo);
  467. list_del_init(&bo->ddestroy);
  468. kref_put(&bo->list_kref, ttm_bo_ref_bug);
  469. spin_unlock(&glob->lru_lock);
  470. ttm_bo_cleanup_memtype_use(bo);
  471. if (unlock_resv)
  472. reservation_object_unlock(bo->resv);
  473. return 0;
  474. }
  475. /**
  476. * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
  477. * encountered buffers.
  478. */
  479. static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
  480. {
  481. struct ttm_bo_global *glob = bdev->glob;
  482. struct list_head removed;
  483. bool empty;
  484. INIT_LIST_HEAD(&removed);
  485. spin_lock(&glob->lru_lock);
  486. while (!list_empty(&bdev->ddestroy)) {
  487. struct ttm_buffer_object *bo;
  488. bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
  489. ddestroy);
  490. kref_get(&bo->list_kref);
  491. list_move_tail(&bo->ddestroy, &removed);
  492. if (remove_all || bo->resv != &bo->ttm_resv) {
  493. spin_unlock(&glob->lru_lock);
  494. reservation_object_lock(bo->resv, NULL);
  495. spin_lock(&glob->lru_lock);
  496. ttm_bo_cleanup_refs(bo, false, !remove_all, true);
  497. } else if (reservation_object_trylock(bo->resv)) {
  498. ttm_bo_cleanup_refs(bo, false, !remove_all, true);
  499. } else {
  500. spin_unlock(&glob->lru_lock);
  501. }
  502. kref_put(&bo->list_kref, ttm_bo_release_list);
  503. spin_lock(&glob->lru_lock);
  504. }
  505. list_splice_tail(&removed, &bdev->ddestroy);
  506. empty = list_empty(&bdev->ddestroy);
  507. spin_unlock(&glob->lru_lock);
  508. return empty;
  509. }
  510. static void ttm_bo_delayed_workqueue(struct work_struct *work)
  511. {
  512. struct ttm_bo_device *bdev =
  513. container_of(work, struct ttm_bo_device, wq.work);
  514. if (!ttm_bo_delayed_delete(bdev, false)) {
  515. schedule_delayed_work(&bdev->wq,
  516. ((HZ / 100) < 1) ? 1 : HZ / 100);
  517. }
  518. }
  519. static void ttm_bo_release(struct kref *kref)
  520. {
  521. struct ttm_buffer_object *bo =
  522. container_of(kref, struct ttm_buffer_object, kref);
  523. struct ttm_bo_device *bdev = bo->bdev;
  524. struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
  525. drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
  526. ttm_mem_io_lock(man, false);
  527. ttm_mem_io_free_vm(bo);
  528. ttm_mem_io_unlock(man);
  529. ttm_bo_cleanup_refs_or_queue(bo);
  530. kref_put(&bo->list_kref, ttm_bo_release_list);
  531. }
  532. void ttm_bo_unref(struct ttm_buffer_object **p_bo)
  533. {
  534. struct ttm_buffer_object *bo = *p_bo;
  535. *p_bo = NULL;
  536. kref_put(&bo->kref, ttm_bo_release);
  537. }
  538. EXPORT_SYMBOL(ttm_bo_unref);
  539. int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
  540. {
  541. return cancel_delayed_work_sync(&bdev->wq);
  542. }
  543. EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
  544. void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
  545. {
  546. if (resched)
  547. schedule_delayed_work(&bdev->wq,
  548. ((HZ / 100) < 1) ? 1 : HZ / 100);
  549. }
  550. EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
  551. static int ttm_bo_evict(struct ttm_buffer_object *bo,
  552. struct ttm_operation_ctx *ctx)
  553. {
  554. struct ttm_bo_device *bdev = bo->bdev;
  555. struct ttm_mem_reg evict_mem;
  556. struct ttm_placement placement;
  557. int ret = 0;
  558. reservation_object_assert_held(bo->resv);
  559. evict_mem = bo->mem;
  560. evict_mem.mm_node = NULL;
  561. evict_mem.bus.io_reserved_vm = false;
  562. evict_mem.bus.io_reserved_count = 0;
  563. placement.num_placement = 0;
  564. placement.num_busy_placement = 0;
  565. bdev->driver->evict_flags(bo, &placement);
  566. ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
  567. if (ret) {
  568. if (ret != -ERESTARTSYS) {
  569. pr_err("Failed to find memory space for buffer 0x%p eviction\n",
  570. bo);
  571. ttm_bo_mem_space_debug(bo, &placement);
  572. }
  573. goto out;
  574. }
  575. ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx);
  576. if (unlikely(ret)) {
  577. if (ret != -ERESTARTSYS)
  578. pr_err("Buffer eviction failed\n");
  579. ttm_bo_mem_put(bo, &evict_mem);
  580. goto out;
  581. }
  582. bo->evicted = true;
  583. out:
  584. return ret;
  585. }
  586. bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
  587. const struct ttm_place *place)
  588. {
  589. /* Don't evict this BO if it's outside of the
  590. * requested placement range
  591. */
  592. if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
  593. (place->lpfn && place->lpfn <= bo->mem.start))
  594. return false;
  595. return true;
  596. }
  597. EXPORT_SYMBOL(ttm_bo_eviction_valuable);
  598. /**
  599. * Check the target bo is allowable to be evicted or swapout, including cases:
  600. *
  601. * a. if share same reservation object with ctx->resv, have assumption
  602. * reservation objects should already be locked, so not lock again and
  603. * return true directly when either the opreation allow_reserved_eviction
  604. * or the target bo already is in delayed free list;
  605. *
  606. * b. Otherwise, trylock it.
  607. */
  608. static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
  609. struct ttm_operation_ctx *ctx, bool *locked)
  610. {
  611. bool ret = false;
  612. *locked = false;
  613. if (bo->resv == ctx->resv) {
  614. reservation_object_assert_held(bo->resv);
  615. if (ctx->allow_reserved_eviction || !list_empty(&bo->ddestroy))
  616. ret = true;
  617. } else {
  618. *locked = reservation_object_trylock(bo->resv);
  619. ret = *locked;
  620. }
  621. return ret;
  622. }
  623. static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
  624. uint32_t mem_type,
  625. const struct ttm_place *place,
  626. struct ttm_operation_ctx *ctx)
  627. {
  628. struct ttm_bo_global *glob = bdev->glob;
  629. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  630. struct ttm_buffer_object *bo = NULL;
  631. bool locked = false;
  632. unsigned i;
  633. int ret;
  634. spin_lock(&glob->lru_lock);
  635. for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
  636. list_for_each_entry(bo, &man->lru[i], lru) {
  637. if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked))
  638. continue;
  639. if (place && !bdev->driver->eviction_valuable(bo,
  640. place)) {
  641. if (locked)
  642. reservation_object_unlock(bo->resv);
  643. continue;
  644. }
  645. break;
  646. }
  647. /* If the inner loop terminated early, we have our candidate */
  648. if (&bo->lru != &man->lru[i])
  649. break;
  650. bo = NULL;
  651. }
  652. if (!bo) {
  653. spin_unlock(&glob->lru_lock);
  654. return -EBUSY;
  655. }
  656. kref_get(&bo->list_kref);
  657. if (!list_empty(&bo->ddestroy)) {
  658. ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
  659. ctx->no_wait_gpu, locked);
  660. kref_put(&bo->list_kref, ttm_bo_release_list);
  661. return ret;
  662. }
  663. ttm_bo_del_from_lru(bo);
  664. spin_unlock(&glob->lru_lock);
  665. ret = ttm_bo_evict(bo, ctx);
  666. if (locked) {
  667. ttm_bo_unreserve(bo);
  668. } else {
  669. spin_lock(&glob->lru_lock);
  670. ttm_bo_add_to_lru(bo);
  671. spin_unlock(&glob->lru_lock);
  672. }
  673. kref_put(&bo->list_kref, ttm_bo_release_list);
  674. return ret;
  675. }
  676. void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
  677. {
  678. struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
  679. if (mem->mm_node)
  680. (*man->func->put_node)(man, mem);
  681. }
  682. EXPORT_SYMBOL(ttm_bo_mem_put);
  683. /**
  684. * Add the last move fence to the BO and reserve a new shared slot.
  685. */
  686. static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
  687. struct ttm_mem_type_manager *man,
  688. struct ttm_mem_reg *mem)
  689. {
  690. struct dma_fence *fence;
  691. int ret;
  692. spin_lock(&man->move_lock);
  693. fence = dma_fence_get(man->move);
  694. spin_unlock(&man->move_lock);
  695. if (fence) {
  696. reservation_object_add_shared_fence(bo->resv, fence);
  697. ret = reservation_object_reserve_shared(bo->resv);
  698. if (unlikely(ret))
  699. return ret;
  700. dma_fence_put(bo->moving);
  701. bo->moving = fence;
  702. }
  703. return 0;
  704. }
  705. /**
  706. * Repeatedly evict memory from the LRU for @mem_type until we create enough
  707. * space, or we've evicted everything and there isn't enough space.
  708. */
  709. static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
  710. uint32_t mem_type,
  711. const struct ttm_place *place,
  712. struct ttm_mem_reg *mem,
  713. struct ttm_operation_ctx *ctx)
  714. {
  715. struct ttm_bo_device *bdev = bo->bdev;
  716. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  717. int ret;
  718. do {
  719. ret = (*man->func->get_node)(man, bo, place, mem);
  720. if (unlikely(ret != 0))
  721. return ret;
  722. if (mem->mm_node)
  723. break;
  724. ret = ttm_mem_evict_first(bdev, mem_type, place, ctx);
  725. if (unlikely(ret != 0))
  726. return ret;
  727. } while (1);
  728. mem->mem_type = mem_type;
  729. return ttm_bo_add_move_fence(bo, man, mem);
  730. }
  731. static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
  732. uint32_t cur_placement,
  733. uint32_t proposed_placement)
  734. {
  735. uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
  736. uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
  737. /**
  738. * Keep current caching if possible.
  739. */
  740. if ((cur_placement & caching) != 0)
  741. result |= (cur_placement & caching);
  742. else if ((man->default_caching & caching) != 0)
  743. result |= man->default_caching;
  744. else if ((TTM_PL_FLAG_CACHED & caching) != 0)
  745. result |= TTM_PL_FLAG_CACHED;
  746. else if ((TTM_PL_FLAG_WC & caching) != 0)
  747. result |= TTM_PL_FLAG_WC;
  748. else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
  749. result |= TTM_PL_FLAG_UNCACHED;
  750. return result;
  751. }
  752. static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
  753. uint32_t mem_type,
  754. const struct ttm_place *place,
  755. uint32_t *masked_placement)
  756. {
  757. uint32_t cur_flags = ttm_bo_type_flags(mem_type);
  758. if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
  759. return false;
  760. if ((place->flags & man->available_caching) == 0)
  761. return false;
  762. cur_flags |= (place->flags & man->available_caching);
  763. *masked_placement = cur_flags;
  764. return true;
  765. }
  766. /**
  767. * Creates space for memory region @mem according to its type.
  768. *
  769. * This function first searches for free space in compatible memory types in
  770. * the priority order defined by the driver. If free space isn't found, then
  771. * ttm_bo_mem_force_space is attempted in priority order to evict and find
  772. * space.
  773. */
  774. int ttm_bo_mem_space(struct ttm_buffer_object *bo,
  775. struct ttm_placement *placement,
  776. struct ttm_mem_reg *mem,
  777. struct ttm_operation_ctx *ctx)
  778. {
  779. struct ttm_bo_device *bdev = bo->bdev;
  780. struct ttm_mem_type_manager *man;
  781. uint32_t mem_type = TTM_PL_SYSTEM;
  782. uint32_t cur_flags = 0;
  783. bool type_found = false;
  784. bool type_ok = false;
  785. bool has_erestartsys = false;
  786. int i, ret;
  787. ret = reservation_object_reserve_shared(bo->resv);
  788. if (unlikely(ret))
  789. return ret;
  790. mem->mm_node = NULL;
  791. for (i = 0; i < placement->num_placement; ++i) {
  792. const struct ttm_place *place = &placement->placement[i];
  793. ret = ttm_mem_type_from_place(place, &mem_type);
  794. if (ret)
  795. return ret;
  796. man = &bdev->man[mem_type];
  797. if (!man->has_type || !man->use_type)
  798. continue;
  799. type_ok = ttm_bo_mt_compatible(man, mem_type, place,
  800. &cur_flags);
  801. if (!type_ok)
  802. continue;
  803. type_found = true;
  804. cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
  805. cur_flags);
  806. /*
  807. * Use the access and other non-mapping-related flag bits from
  808. * the memory placement flags to the current flags
  809. */
  810. ttm_flag_masked(&cur_flags, place->flags,
  811. ~TTM_PL_MASK_MEMTYPE);
  812. if (mem_type == TTM_PL_SYSTEM)
  813. break;
  814. ret = (*man->func->get_node)(man, bo, place, mem);
  815. if (unlikely(ret))
  816. return ret;
  817. if (mem->mm_node) {
  818. ret = ttm_bo_add_move_fence(bo, man, mem);
  819. if (unlikely(ret)) {
  820. (*man->func->put_node)(man, mem);
  821. return ret;
  822. }
  823. break;
  824. }
  825. }
  826. if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
  827. mem->mem_type = mem_type;
  828. mem->placement = cur_flags;
  829. return 0;
  830. }
  831. for (i = 0; i < placement->num_busy_placement; ++i) {
  832. const struct ttm_place *place = &placement->busy_placement[i];
  833. ret = ttm_mem_type_from_place(place, &mem_type);
  834. if (ret)
  835. return ret;
  836. man = &bdev->man[mem_type];
  837. if (!man->has_type || !man->use_type)
  838. continue;
  839. if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
  840. continue;
  841. type_found = true;
  842. cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
  843. cur_flags);
  844. /*
  845. * Use the access and other non-mapping-related flag bits from
  846. * the memory placement flags to the current flags
  847. */
  848. ttm_flag_masked(&cur_flags, place->flags,
  849. ~TTM_PL_MASK_MEMTYPE);
  850. if (mem_type == TTM_PL_SYSTEM) {
  851. mem->mem_type = mem_type;
  852. mem->placement = cur_flags;
  853. mem->mm_node = NULL;
  854. return 0;
  855. }
  856. ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, ctx);
  857. if (ret == 0 && mem->mm_node) {
  858. mem->placement = cur_flags;
  859. return 0;
  860. }
  861. if (ret == -ERESTARTSYS)
  862. has_erestartsys = true;
  863. }
  864. if (!type_found) {
  865. pr_err(TTM_PFX "No compatible memory type found\n");
  866. return -EINVAL;
  867. }
  868. return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
  869. }
  870. EXPORT_SYMBOL(ttm_bo_mem_space);
  871. static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
  872. struct ttm_placement *placement,
  873. struct ttm_operation_ctx *ctx)
  874. {
  875. int ret = 0;
  876. struct ttm_mem_reg mem;
  877. reservation_object_assert_held(bo->resv);
  878. mem.num_pages = bo->num_pages;
  879. mem.size = mem.num_pages << PAGE_SHIFT;
  880. mem.page_alignment = bo->mem.page_alignment;
  881. mem.bus.io_reserved_vm = false;
  882. mem.bus.io_reserved_count = 0;
  883. /*
  884. * Determine where to move the buffer.
  885. */
  886. ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
  887. if (ret)
  888. goto out_unlock;
  889. ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx);
  890. out_unlock:
  891. if (ret && mem.mm_node)
  892. ttm_bo_mem_put(bo, &mem);
  893. return ret;
  894. }
  895. static bool ttm_bo_places_compat(const struct ttm_place *places,
  896. unsigned num_placement,
  897. struct ttm_mem_reg *mem,
  898. uint32_t *new_flags)
  899. {
  900. unsigned i;
  901. for (i = 0; i < num_placement; i++) {
  902. const struct ttm_place *heap = &places[i];
  903. if (mem->mm_node && (mem->start < heap->fpfn ||
  904. (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
  905. continue;
  906. *new_flags = heap->flags;
  907. if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
  908. (*new_flags & mem->placement & TTM_PL_MASK_MEM) &&
  909. (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
  910. (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
  911. return true;
  912. }
  913. return false;
  914. }
  915. bool ttm_bo_mem_compat(struct ttm_placement *placement,
  916. struct ttm_mem_reg *mem,
  917. uint32_t *new_flags)
  918. {
  919. if (ttm_bo_places_compat(placement->placement, placement->num_placement,
  920. mem, new_flags))
  921. return true;
  922. if ((placement->busy_placement != placement->placement ||
  923. placement->num_busy_placement > placement->num_placement) &&
  924. ttm_bo_places_compat(placement->busy_placement,
  925. placement->num_busy_placement,
  926. mem, new_flags))
  927. return true;
  928. return false;
  929. }
  930. EXPORT_SYMBOL(ttm_bo_mem_compat);
  931. int ttm_bo_validate(struct ttm_buffer_object *bo,
  932. struct ttm_placement *placement,
  933. struct ttm_operation_ctx *ctx)
  934. {
  935. int ret;
  936. uint32_t new_flags;
  937. reservation_object_assert_held(bo->resv);
  938. /*
  939. * Check whether we need to move buffer.
  940. */
  941. if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
  942. ret = ttm_bo_move_buffer(bo, placement, ctx);
  943. if (ret)
  944. return ret;
  945. } else {
  946. /*
  947. * Use the access and other non-mapping-related flag bits from
  948. * the compatible memory placement flags to the active flags
  949. */
  950. ttm_flag_masked(&bo->mem.placement, new_flags,
  951. ~TTM_PL_MASK_MEMTYPE);
  952. }
  953. /*
  954. * We might need to add a TTM.
  955. */
  956. if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
  957. ret = ttm_bo_add_ttm(bo, true);
  958. if (ret)
  959. return ret;
  960. }
  961. return 0;
  962. }
  963. EXPORT_SYMBOL(ttm_bo_validate);
  964. int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
  965. struct ttm_buffer_object *bo,
  966. unsigned long size,
  967. enum ttm_bo_type type,
  968. struct ttm_placement *placement,
  969. uint32_t page_alignment,
  970. struct ttm_operation_ctx *ctx,
  971. struct file *persistent_swap_storage,
  972. size_t acc_size,
  973. struct sg_table *sg,
  974. struct reservation_object *resv,
  975. void (*destroy) (struct ttm_buffer_object *))
  976. {
  977. int ret = 0;
  978. unsigned long num_pages;
  979. struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
  980. bool locked;
  981. ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
  982. if (ret) {
  983. pr_err("Out of kernel memory\n");
  984. if (destroy)
  985. (*destroy)(bo);
  986. else
  987. kfree(bo);
  988. return -ENOMEM;
  989. }
  990. num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  991. if (num_pages == 0) {
  992. pr_err("Illegal buffer object size\n");
  993. if (destroy)
  994. (*destroy)(bo);
  995. else
  996. kfree(bo);
  997. ttm_mem_global_free(mem_glob, acc_size);
  998. return -EINVAL;
  999. }
  1000. bo->destroy = destroy;
  1001. kref_init(&bo->kref);
  1002. kref_init(&bo->list_kref);
  1003. atomic_set(&bo->cpu_writers, 0);
  1004. INIT_LIST_HEAD(&bo->lru);
  1005. INIT_LIST_HEAD(&bo->ddestroy);
  1006. INIT_LIST_HEAD(&bo->swap);
  1007. INIT_LIST_HEAD(&bo->io_reserve_lru);
  1008. mutex_init(&bo->wu_mutex);
  1009. bo->bdev = bdev;
  1010. bo->glob = bdev->glob;
  1011. bo->type = type;
  1012. bo->num_pages = num_pages;
  1013. bo->mem.size = num_pages << PAGE_SHIFT;
  1014. bo->mem.mem_type = TTM_PL_SYSTEM;
  1015. bo->mem.num_pages = bo->num_pages;
  1016. bo->mem.mm_node = NULL;
  1017. bo->mem.page_alignment = page_alignment;
  1018. bo->mem.bus.io_reserved_vm = false;
  1019. bo->mem.bus.io_reserved_count = 0;
  1020. bo->moving = NULL;
  1021. bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
  1022. bo->persistent_swap_storage = persistent_swap_storage;
  1023. bo->acc_size = acc_size;
  1024. bo->sg = sg;
  1025. if (resv) {
  1026. bo->resv = resv;
  1027. reservation_object_assert_held(bo->resv);
  1028. } else {
  1029. bo->resv = &bo->ttm_resv;
  1030. }
  1031. reservation_object_init(&bo->ttm_resv);
  1032. atomic_inc(&bo->glob->bo_count);
  1033. drm_vma_node_reset(&bo->vma_node);
  1034. bo->priority = 0;
  1035. /*
  1036. * For ttm_bo_type_device buffers, allocate
  1037. * address space from the device.
  1038. */
  1039. if (bo->type == ttm_bo_type_device ||
  1040. bo->type == ttm_bo_type_sg)
  1041. ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
  1042. bo->mem.num_pages);
  1043. /* passed reservation objects should already be locked,
  1044. * since otherwise lockdep will be angered in radeon.
  1045. */
  1046. if (!resv) {
  1047. locked = reservation_object_trylock(bo->resv);
  1048. WARN_ON(!locked);
  1049. }
  1050. if (likely(!ret))
  1051. ret = ttm_bo_validate(bo, placement, ctx);
  1052. if (unlikely(ret)) {
  1053. if (!resv)
  1054. ttm_bo_unreserve(bo);
  1055. ttm_bo_unref(&bo);
  1056. return ret;
  1057. }
  1058. if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
  1059. spin_lock(&bo->glob->lru_lock);
  1060. ttm_bo_add_to_lru(bo);
  1061. spin_unlock(&bo->glob->lru_lock);
  1062. }
  1063. return ret;
  1064. }
  1065. EXPORT_SYMBOL(ttm_bo_init_reserved);
  1066. int ttm_bo_init(struct ttm_bo_device *bdev,
  1067. struct ttm_buffer_object *bo,
  1068. unsigned long size,
  1069. enum ttm_bo_type type,
  1070. struct ttm_placement *placement,
  1071. uint32_t page_alignment,
  1072. bool interruptible,
  1073. struct file *persistent_swap_storage,
  1074. size_t acc_size,
  1075. struct sg_table *sg,
  1076. struct reservation_object *resv,
  1077. void (*destroy) (struct ttm_buffer_object *))
  1078. {
  1079. struct ttm_operation_ctx ctx = { interruptible, false };
  1080. int ret;
  1081. ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
  1082. page_alignment, &ctx,
  1083. persistent_swap_storage, acc_size,
  1084. sg, resv, destroy);
  1085. if (ret)
  1086. return ret;
  1087. if (!resv)
  1088. ttm_bo_unreserve(bo);
  1089. return 0;
  1090. }
  1091. EXPORT_SYMBOL(ttm_bo_init);
  1092. size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
  1093. unsigned long bo_size,
  1094. unsigned struct_size)
  1095. {
  1096. unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
  1097. size_t size = 0;
  1098. size += ttm_round_pot(struct_size);
  1099. size += ttm_round_pot(npages * sizeof(void *));
  1100. size += ttm_round_pot(sizeof(struct ttm_tt));
  1101. return size;
  1102. }
  1103. EXPORT_SYMBOL(ttm_bo_acc_size);
  1104. size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
  1105. unsigned long bo_size,
  1106. unsigned struct_size)
  1107. {
  1108. unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
  1109. size_t size = 0;
  1110. size += ttm_round_pot(struct_size);
  1111. size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
  1112. size += ttm_round_pot(sizeof(struct ttm_dma_tt));
  1113. return size;
  1114. }
  1115. EXPORT_SYMBOL(ttm_bo_dma_acc_size);
  1116. int ttm_bo_create(struct ttm_bo_device *bdev,
  1117. unsigned long size,
  1118. enum ttm_bo_type type,
  1119. struct ttm_placement *placement,
  1120. uint32_t page_alignment,
  1121. bool interruptible,
  1122. struct file *persistent_swap_storage,
  1123. struct ttm_buffer_object **p_bo)
  1124. {
  1125. struct ttm_buffer_object *bo;
  1126. size_t acc_size;
  1127. int ret;
  1128. bo = kzalloc(sizeof(*bo), GFP_KERNEL);
  1129. if (unlikely(bo == NULL))
  1130. return -ENOMEM;
  1131. acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
  1132. ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
  1133. interruptible, persistent_swap_storage, acc_size,
  1134. NULL, NULL, NULL);
  1135. if (likely(ret == 0))
  1136. *p_bo = bo;
  1137. return ret;
  1138. }
  1139. EXPORT_SYMBOL(ttm_bo_create);
  1140. static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
  1141. unsigned mem_type)
  1142. {
  1143. struct ttm_operation_ctx ctx = { false, false };
  1144. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  1145. struct ttm_bo_global *glob = bdev->glob;
  1146. struct dma_fence *fence;
  1147. int ret;
  1148. unsigned i;
  1149. /*
  1150. * Can't use standard list traversal since we're unlocking.
  1151. */
  1152. spin_lock(&glob->lru_lock);
  1153. for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
  1154. while (!list_empty(&man->lru[i])) {
  1155. spin_unlock(&glob->lru_lock);
  1156. ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
  1157. if (ret)
  1158. return ret;
  1159. spin_lock(&glob->lru_lock);
  1160. }
  1161. }
  1162. spin_unlock(&glob->lru_lock);
  1163. spin_lock(&man->move_lock);
  1164. fence = dma_fence_get(man->move);
  1165. spin_unlock(&man->move_lock);
  1166. if (fence) {
  1167. ret = dma_fence_wait(fence, false);
  1168. dma_fence_put(fence);
  1169. if (ret)
  1170. return ret;
  1171. }
  1172. return 0;
  1173. }
  1174. int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
  1175. {
  1176. struct ttm_mem_type_manager *man;
  1177. int ret = -EINVAL;
  1178. if (mem_type >= TTM_NUM_MEM_TYPES) {
  1179. pr_err("Illegal memory type %d\n", mem_type);
  1180. return ret;
  1181. }
  1182. man = &bdev->man[mem_type];
  1183. if (!man->has_type) {
  1184. pr_err("Trying to take down uninitialized memory manager type %u\n",
  1185. mem_type);
  1186. return ret;
  1187. }
  1188. man->use_type = false;
  1189. man->has_type = false;
  1190. ret = 0;
  1191. if (mem_type > 0) {
  1192. ret = ttm_bo_force_list_clean(bdev, mem_type);
  1193. if (ret) {
  1194. pr_err("Cleanup eviction failed\n");
  1195. return ret;
  1196. }
  1197. ret = (*man->func->takedown)(man);
  1198. }
  1199. dma_fence_put(man->move);
  1200. man->move = NULL;
  1201. return ret;
  1202. }
  1203. EXPORT_SYMBOL(ttm_bo_clean_mm);
  1204. int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
  1205. {
  1206. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  1207. if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
  1208. pr_err("Illegal memory manager memory type %u\n", mem_type);
  1209. return -EINVAL;
  1210. }
  1211. if (!man->has_type) {
  1212. pr_err("Memory type %u has not been initialized\n", mem_type);
  1213. return 0;
  1214. }
  1215. return ttm_bo_force_list_clean(bdev, mem_type);
  1216. }
  1217. EXPORT_SYMBOL(ttm_bo_evict_mm);
  1218. int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
  1219. unsigned long p_size)
  1220. {
  1221. int ret;
  1222. struct ttm_mem_type_manager *man;
  1223. unsigned i;
  1224. BUG_ON(type >= TTM_NUM_MEM_TYPES);
  1225. man = &bdev->man[type];
  1226. BUG_ON(man->has_type);
  1227. man->io_reserve_fastpath = true;
  1228. man->use_io_reserve_lru = false;
  1229. mutex_init(&man->io_reserve_mutex);
  1230. spin_lock_init(&man->move_lock);
  1231. INIT_LIST_HEAD(&man->io_reserve_lru);
  1232. ret = bdev->driver->init_mem_type(bdev, type, man);
  1233. if (ret)
  1234. return ret;
  1235. man->bdev = bdev;
  1236. if (type != TTM_PL_SYSTEM) {
  1237. ret = (*man->func->init)(man, p_size);
  1238. if (ret)
  1239. return ret;
  1240. }
  1241. man->has_type = true;
  1242. man->use_type = true;
  1243. man->size = p_size;
  1244. for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
  1245. INIT_LIST_HEAD(&man->lru[i]);
  1246. man->move = NULL;
  1247. return 0;
  1248. }
  1249. EXPORT_SYMBOL(ttm_bo_init_mm);
  1250. static void ttm_bo_global_kobj_release(struct kobject *kobj)
  1251. {
  1252. struct ttm_bo_global *glob =
  1253. container_of(kobj, struct ttm_bo_global, kobj);
  1254. __free_page(glob->dummy_read_page);
  1255. kfree(glob);
  1256. }
  1257. void ttm_bo_global_release(struct drm_global_reference *ref)
  1258. {
  1259. struct ttm_bo_global *glob = ref->object;
  1260. kobject_del(&glob->kobj);
  1261. kobject_put(&glob->kobj);
  1262. }
  1263. EXPORT_SYMBOL(ttm_bo_global_release);
  1264. int ttm_bo_global_init(struct drm_global_reference *ref)
  1265. {
  1266. struct ttm_bo_global_ref *bo_ref =
  1267. container_of(ref, struct ttm_bo_global_ref, ref);
  1268. struct ttm_bo_global *glob = ref->object;
  1269. int ret;
  1270. unsigned i;
  1271. mutex_init(&glob->device_list_mutex);
  1272. spin_lock_init(&glob->lru_lock);
  1273. glob->mem_glob = bo_ref->mem_glob;
  1274. glob->mem_glob->bo_glob = glob;
  1275. glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
  1276. if (unlikely(glob->dummy_read_page == NULL)) {
  1277. ret = -ENOMEM;
  1278. goto out_no_drp;
  1279. }
  1280. for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
  1281. INIT_LIST_HEAD(&glob->swap_lru[i]);
  1282. INIT_LIST_HEAD(&glob->device_list);
  1283. atomic_set(&glob->bo_count, 0);
  1284. ret = kobject_init_and_add(
  1285. &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
  1286. if (unlikely(ret != 0))
  1287. kobject_put(&glob->kobj);
  1288. return ret;
  1289. out_no_drp:
  1290. kfree(glob);
  1291. return ret;
  1292. }
  1293. EXPORT_SYMBOL(ttm_bo_global_init);
  1294. int ttm_bo_device_release(struct ttm_bo_device *bdev)
  1295. {
  1296. int ret = 0;
  1297. unsigned i = TTM_NUM_MEM_TYPES;
  1298. struct ttm_mem_type_manager *man;
  1299. struct ttm_bo_global *glob = bdev->glob;
  1300. while (i--) {
  1301. man = &bdev->man[i];
  1302. if (man->has_type) {
  1303. man->use_type = false;
  1304. if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
  1305. ret = -EBUSY;
  1306. pr_err("DRM memory manager type %d is not clean\n",
  1307. i);
  1308. }
  1309. man->has_type = false;
  1310. }
  1311. }
  1312. mutex_lock(&glob->device_list_mutex);
  1313. list_del(&bdev->device_list);
  1314. mutex_unlock(&glob->device_list_mutex);
  1315. cancel_delayed_work_sync(&bdev->wq);
  1316. if (ttm_bo_delayed_delete(bdev, true))
  1317. pr_debug("Delayed destroy list was clean\n");
  1318. spin_lock(&glob->lru_lock);
  1319. for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
  1320. if (list_empty(&bdev->man[0].lru[0]))
  1321. pr_debug("Swap list %d was clean\n", i);
  1322. spin_unlock(&glob->lru_lock);
  1323. drm_vma_offset_manager_destroy(&bdev->vma_manager);
  1324. return ret;
  1325. }
  1326. EXPORT_SYMBOL(ttm_bo_device_release);
  1327. int ttm_bo_device_init(struct ttm_bo_device *bdev,
  1328. struct ttm_bo_global *glob,
  1329. struct ttm_bo_driver *driver,
  1330. struct address_space *mapping,
  1331. uint64_t file_page_offset,
  1332. bool need_dma32)
  1333. {
  1334. int ret = -EINVAL;
  1335. bdev->driver = driver;
  1336. memset(bdev->man, 0, sizeof(bdev->man));
  1337. /*
  1338. * Initialize the system memory buffer type.
  1339. * Other types need to be driver / IOCTL initialized.
  1340. */
  1341. ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
  1342. if (unlikely(ret != 0))
  1343. goto out_no_sys;
  1344. drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
  1345. 0x10000000);
  1346. INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
  1347. INIT_LIST_HEAD(&bdev->ddestroy);
  1348. bdev->dev_mapping = mapping;
  1349. bdev->glob = glob;
  1350. bdev->need_dma32 = need_dma32;
  1351. mutex_lock(&glob->device_list_mutex);
  1352. list_add_tail(&bdev->device_list, &glob->device_list);
  1353. mutex_unlock(&glob->device_list_mutex);
  1354. return 0;
  1355. out_no_sys:
  1356. return ret;
  1357. }
  1358. EXPORT_SYMBOL(ttm_bo_device_init);
  1359. /*
  1360. * buffer object vm functions.
  1361. */
  1362. bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  1363. {
  1364. struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
  1365. if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
  1366. if (mem->mem_type == TTM_PL_SYSTEM)
  1367. return false;
  1368. if (man->flags & TTM_MEMTYPE_FLAG_CMA)
  1369. return false;
  1370. if (mem->placement & TTM_PL_FLAG_CACHED)
  1371. return false;
  1372. }
  1373. return true;
  1374. }
  1375. void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
  1376. {
  1377. struct ttm_bo_device *bdev = bo->bdev;
  1378. drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
  1379. ttm_mem_io_free_vm(bo);
  1380. }
  1381. void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
  1382. {
  1383. struct ttm_bo_device *bdev = bo->bdev;
  1384. struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
  1385. ttm_mem_io_lock(man, false);
  1386. ttm_bo_unmap_virtual_locked(bo);
  1387. ttm_mem_io_unlock(man);
  1388. }
  1389. EXPORT_SYMBOL(ttm_bo_unmap_virtual);
  1390. int ttm_bo_wait(struct ttm_buffer_object *bo,
  1391. bool interruptible, bool no_wait)
  1392. {
  1393. long timeout = 15 * HZ;
  1394. if (no_wait) {
  1395. if (reservation_object_test_signaled_rcu(bo->resv, true))
  1396. return 0;
  1397. else
  1398. return -EBUSY;
  1399. }
  1400. timeout = reservation_object_wait_timeout_rcu(bo->resv, true,
  1401. interruptible, timeout);
  1402. if (timeout < 0)
  1403. return timeout;
  1404. if (timeout == 0)
  1405. return -EBUSY;
  1406. reservation_object_add_excl_fence(bo->resv, NULL);
  1407. return 0;
  1408. }
  1409. EXPORT_SYMBOL(ttm_bo_wait);
  1410. int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
  1411. {
  1412. int ret = 0;
  1413. /*
  1414. * Using ttm_bo_reserve makes sure the lru lists are updated.
  1415. */
  1416. ret = ttm_bo_reserve(bo, true, no_wait, NULL);
  1417. if (unlikely(ret != 0))
  1418. return ret;
  1419. ret = ttm_bo_wait(bo, true, no_wait);
  1420. if (likely(ret == 0))
  1421. atomic_inc(&bo->cpu_writers);
  1422. ttm_bo_unreserve(bo);
  1423. return ret;
  1424. }
  1425. EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
  1426. void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
  1427. {
  1428. atomic_dec(&bo->cpu_writers);
  1429. }
  1430. EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
  1431. /**
  1432. * A buffer object shrink method that tries to swap out the first
  1433. * buffer object on the bo_global::swap_lru list.
  1434. */
  1435. int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
  1436. {
  1437. struct ttm_buffer_object *bo;
  1438. int ret = -EBUSY;
  1439. bool locked;
  1440. unsigned i;
  1441. spin_lock(&glob->lru_lock);
  1442. for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
  1443. list_for_each_entry(bo, &glob->swap_lru[i], swap) {
  1444. if (ttm_bo_evict_swapout_allowable(bo, ctx, &locked)) {
  1445. ret = 0;
  1446. break;
  1447. }
  1448. }
  1449. if (!ret)
  1450. break;
  1451. }
  1452. if (ret) {
  1453. spin_unlock(&glob->lru_lock);
  1454. return ret;
  1455. }
  1456. kref_get(&bo->list_kref);
  1457. if (!list_empty(&bo->ddestroy)) {
  1458. ret = ttm_bo_cleanup_refs(bo, false, false, true);
  1459. kref_put(&bo->list_kref, ttm_bo_release_list);
  1460. return ret;
  1461. }
  1462. ttm_bo_del_from_lru(bo);
  1463. spin_unlock(&glob->lru_lock);
  1464. /**
  1465. * Move to system cached
  1466. */
  1467. if (bo->mem.mem_type != TTM_PL_SYSTEM ||
  1468. bo->ttm->caching_state != tt_cached) {
  1469. struct ttm_operation_ctx ctx = { false, false };
  1470. struct ttm_mem_reg evict_mem;
  1471. evict_mem = bo->mem;
  1472. evict_mem.mm_node = NULL;
  1473. evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
  1474. evict_mem.mem_type = TTM_PL_SYSTEM;
  1475. ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx);
  1476. if (unlikely(ret != 0))
  1477. goto out;
  1478. }
  1479. /**
  1480. * Make sure BO is idle.
  1481. */
  1482. ret = ttm_bo_wait(bo, false, false);
  1483. if (unlikely(ret != 0))
  1484. goto out;
  1485. ttm_bo_unmap_virtual(bo);
  1486. /**
  1487. * Swap out. Buffer will be swapped in again as soon as
  1488. * anyone tries to access a ttm page.
  1489. */
  1490. if (bo->bdev->driver->swap_notify)
  1491. bo->bdev->driver->swap_notify(bo);
  1492. ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
  1493. out:
  1494. /**
  1495. *
  1496. * Unreserve without putting on LRU to avoid swapping out an
  1497. * already swapped buffer.
  1498. */
  1499. reservation_object_unlock(bo->resv);
  1500. kref_put(&bo->list_kref, ttm_bo_release_list);
  1501. return ret;
  1502. }
  1503. EXPORT_SYMBOL(ttm_bo_swapout);
  1504. void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
  1505. {
  1506. struct ttm_operation_ctx ctx = {
  1507. .interruptible = false,
  1508. .no_wait_gpu = false
  1509. };
  1510. while (ttm_bo_swapout(bdev->glob, &ctx) == 0)
  1511. ;
  1512. }
  1513. EXPORT_SYMBOL(ttm_bo_swapout_all);
  1514. /**
  1515. * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
  1516. * unreserved
  1517. *
  1518. * @bo: Pointer to buffer
  1519. */
  1520. int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
  1521. {
  1522. int ret;
  1523. /*
  1524. * In the absense of a wait_unlocked API,
  1525. * Use the bo::wu_mutex to avoid triggering livelocks due to
  1526. * concurrent use of this function. Note that this use of
  1527. * bo::wu_mutex can go away if we change locking order to
  1528. * mmap_sem -> bo::reserve.
  1529. */
  1530. ret = mutex_lock_interruptible(&bo->wu_mutex);
  1531. if (unlikely(ret != 0))
  1532. return -ERESTARTSYS;
  1533. if (!ww_mutex_is_locked(&bo->resv->lock))
  1534. goto out_unlock;
  1535. ret = reservation_object_lock_interruptible(bo->resv, NULL);
  1536. if (ret == -EINTR)
  1537. ret = -ERESTARTSYS;
  1538. if (unlikely(ret != 0))
  1539. goto out_unlock;
  1540. reservation_object_unlock(bo->resv);
  1541. out_unlock:
  1542. mutex_unlock(&bo->wu_mutex);
  1543. return ret;
  1544. }