ttm_bo_util.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804
  1. /**************************************************************************
  2. *
  3. * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. /*
  28. * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29. */
  30. #include <drm/ttm/ttm_bo_driver.h>
  31. #include <drm/ttm/ttm_placement.h>
  32. #include <drm/drm_vma_manager.h>
  33. #include <linux/io.h>
  34. #include <linux/highmem.h>
  35. #include <linux/wait.h>
  36. #include <linux/slab.h>
  37. #include <linux/vmalloc.h>
  38. #include <linux/module.h>
  39. #include <linux/reservation.h>
  40. void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
  41. {
  42. ttm_bo_mem_put(bo, &bo->mem);
  43. }
  44. int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
  45. struct ttm_operation_ctx *ctx,
  46. struct ttm_mem_reg *new_mem)
  47. {
  48. struct ttm_tt *ttm = bo->ttm;
  49. struct ttm_mem_reg *old_mem = &bo->mem;
  50. int ret;
  51. if (old_mem->mem_type != TTM_PL_SYSTEM) {
  52. ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
  53. if (unlikely(ret != 0)) {
  54. if (ret != -ERESTARTSYS)
  55. pr_err("Failed to expire sync object before unbinding TTM\n");
  56. return ret;
  57. }
  58. ttm_tt_unbind(ttm);
  59. ttm_bo_free_old_node(bo);
  60. ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
  61. TTM_PL_MASK_MEM);
  62. old_mem->mem_type = TTM_PL_SYSTEM;
  63. }
  64. ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
  65. if (unlikely(ret != 0))
  66. return ret;
  67. if (new_mem->mem_type != TTM_PL_SYSTEM) {
  68. ret = ttm_tt_bind(ttm, new_mem, ctx);
  69. if (unlikely(ret != 0))
  70. return ret;
  71. }
  72. *old_mem = *new_mem;
  73. new_mem->mm_node = NULL;
  74. return 0;
  75. }
  76. EXPORT_SYMBOL(ttm_bo_move_ttm);
  77. int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
  78. {
  79. if (likely(man->io_reserve_fastpath))
  80. return 0;
  81. if (interruptible)
  82. return mutex_lock_interruptible(&man->io_reserve_mutex);
  83. mutex_lock(&man->io_reserve_mutex);
  84. return 0;
  85. }
  86. EXPORT_SYMBOL(ttm_mem_io_lock);
  87. void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
  88. {
  89. if (likely(man->io_reserve_fastpath))
  90. return;
  91. mutex_unlock(&man->io_reserve_mutex);
  92. }
  93. EXPORT_SYMBOL(ttm_mem_io_unlock);
  94. static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
  95. {
  96. struct ttm_buffer_object *bo;
  97. if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
  98. return -EAGAIN;
  99. bo = list_first_entry(&man->io_reserve_lru,
  100. struct ttm_buffer_object,
  101. io_reserve_lru);
  102. list_del_init(&bo->io_reserve_lru);
  103. ttm_bo_unmap_virtual_locked(bo);
  104. return 0;
  105. }
  106. int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
  107. struct ttm_mem_reg *mem)
  108. {
  109. struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
  110. int ret = 0;
  111. if (!bdev->driver->io_mem_reserve)
  112. return 0;
  113. if (likely(man->io_reserve_fastpath))
  114. return bdev->driver->io_mem_reserve(bdev, mem);
  115. if (bdev->driver->io_mem_reserve &&
  116. mem->bus.io_reserved_count++ == 0) {
  117. retry:
  118. ret = bdev->driver->io_mem_reserve(bdev, mem);
  119. if (ret == -EAGAIN) {
  120. ret = ttm_mem_io_evict(man);
  121. if (ret == 0)
  122. goto retry;
  123. }
  124. }
  125. return ret;
  126. }
  127. EXPORT_SYMBOL(ttm_mem_io_reserve);
  128. void ttm_mem_io_free(struct ttm_bo_device *bdev,
  129. struct ttm_mem_reg *mem)
  130. {
  131. struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
  132. if (likely(man->io_reserve_fastpath))
  133. return;
  134. if (bdev->driver->io_mem_reserve &&
  135. --mem->bus.io_reserved_count == 0 &&
  136. bdev->driver->io_mem_free)
  137. bdev->driver->io_mem_free(bdev, mem);
  138. }
  139. EXPORT_SYMBOL(ttm_mem_io_free);
  140. int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
  141. {
  142. struct ttm_mem_reg *mem = &bo->mem;
  143. int ret;
  144. if (!mem->bus.io_reserved_vm) {
  145. struct ttm_mem_type_manager *man =
  146. &bo->bdev->man[mem->mem_type];
  147. ret = ttm_mem_io_reserve(bo->bdev, mem);
  148. if (unlikely(ret != 0))
  149. return ret;
  150. mem->bus.io_reserved_vm = true;
  151. if (man->use_io_reserve_lru)
  152. list_add_tail(&bo->io_reserve_lru,
  153. &man->io_reserve_lru);
  154. }
  155. return 0;
  156. }
  157. void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
  158. {
  159. struct ttm_mem_reg *mem = &bo->mem;
  160. if (mem->bus.io_reserved_vm) {
  161. mem->bus.io_reserved_vm = false;
  162. list_del_init(&bo->io_reserve_lru);
  163. ttm_mem_io_free(bo->bdev, mem);
  164. }
  165. }
  166. static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
  167. void **virtual)
  168. {
  169. struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
  170. int ret;
  171. void *addr;
  172. *virtual = NULL;
  173. (void) ttm_mem_io_lock(man, false);
  174. ret = ttm_mem_io_reserve(bdev, mem);
  175. ttm_mem_io_unlock(man);
  176. if (ret || !mem->bus.is_iomem)
  177. return ret;
  178. if (mem->bus.addr) {
  179. addr = mem->bus.addr;
  180. } else {
  181. if (mem->placement & TTM_PL_FLAG_WC)
  182. addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
  183. else
  184. addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
  185. if (!addr) {
  186. (void) ttm_mem_io_lock(man, false);
  187. ttm_mem_io_free(bdev, mem);
  188. ttm_mem_io_unlock(man);
  189. return -ENOMEM;
  190. }
  191. }
  192. *virtual = addr;
  193. return 0;
  194. }
  195. static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
  196. void *virtual)
  197. {
  198. struct ttm_mem_type_manager *man;
  199. man = &bdev->man[mem->mem_type];
  200. if (virtual && mem->bus.addr == NULL)
  201. iounmap(virtual);
  202. (void) ttm_mem_io_lock(man, false);
  203. ttm_mem_io_free(bdev, mem);
  204. ttm_mem_io_unlock(man);
  205. }
  206. static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
  207. {
  208. uint32_t *dstP =
  209. (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
  210. uint32_t *srcP =
  211. (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
  212. int i;
  213. for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
  214. iowrite32(ioread32(srcP++), dstP++);
  215. return 0;
  216. }
  217. static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
  218. unsigned long page,
  219. pgprot_t prot)
  220. {
  221. struct page *d = ttm->pages[page];
  222. void *dst;
  223. if (!d)
  224. return -ENOMEM;
  225. src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
  226. #ifdef CONFIG_X86
  227. dst = kmap_atomic_prot(d, prot);
  228. #else
  229. if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
  230. dst = vmap(&d, 1, 0, prot);
  231. else
  232. dst = kmap(d);
  233. #endif
  234. if (!dst)
  235. return -ENOMEM;
  236. memcpy_fromio(dst, src, PAGE_SIZE);
  237. #ifdef CONFIG_X86
  238. kunmap_atomic(dst);
  239. #else
  240. if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
  241. vunmap(dst);
  242. else
  243. kunmap(d);
  244. #endif
  245. return 0;
  246. }
  247. static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
  248. unsigned long page,
  249. pgprot_t prot)
  250. {
  251. struct page *s = ttm->pages[page];
  252. void *src;
  253. if (!s)
  254. return -ENOMEM;
  255. dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
  256. #ifdef CONFIG_X86
  257. src = kmap_atomic_prot(s, prot);
  258. #else
  259. if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
  260. src = vmap(&s, 1, 0, prot);
  261. else
  262. src = kmap(s);
  263. #endif
  264. if (!src)
  265. return -ENOMEM;
  266. memcpy_toio(dst, src, PAGE_SIZE);
  267. #ifdef CONFIG_X86
  268. kunmap_atomic(src);
  269. #else
  270. if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
  271. vunmap(src);
  272. else
  273. kunmap(s);
  274. #endif
  275. return 0;
  276. }
  277. int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
  278. struct ttm_operation_ctx *ctx,
  279. struct ttm_mem_reg *new_mem)
  280. {
  281. struct ttm_bo_device *bdev = bo->bdev;
  282. struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
  283. struct ttm_tt *ttm = bo->ttm;
  284. struct ttm_mem_reg *old_mem = &bo->mem;
  285. struct ttm_mem_reg old_copy = *old_mem;
  286. void *old_iomap;
  287. void *new_iomap;
  288. int ret;
  289. unsigned long i;
  290. unsigned long page;
  291. unsigned long add = 0;
  292. int dir;
  293. ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
  294. if (ret)
  295. return ret;
  296. ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
  297. if (ret)
  298. return ret;
  299. ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
  300. if (ret)
  301. goto out;
  302. /*
  303. * Single TTM move. NOP.
  304. */
  305. if (old_iomap == NULL && new_iomap == NULL)
  306. goto out2;
  307. /*
  308. * Don't move nonexistent data. Clear destination instead.
  309. */
  310. if (old_iomap == NULL &&
  311. (ttm == NULL || (ttm->state == tt_unpopulated &&
  312. !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
  313. memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
  314. goto out2;
  315. }
  316. /*
  317. * TTM might be null for moves within the same region.
  318. */
  319. if (ttm && ttm->state == tt_unpopulated) {
  320. ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx);
  321. if (ret)
  322. goto out1;
  323. }
  324. add = 0;
  325. dir = 1;
  326. if ((old_mem->mem_type == new_mem->mem_type) &&
  327. (new_mem->start < old_mem->start + old_mem->size)) {
  328. dir = -1;
  329. add = new_mem->num_pages - 1;
  330. }
  331. for (i = 0; i < new_mem->num_pages; ++i) {
  332. page = i * dir + add;
  333. if (old_iomap == NULL) {
  334. pgprot_t prot = ttm_io_prot(old_mem->placement,
  335. PAGE_KERNEL);
  336. ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
  337. prot);
  338. } else if (new_iomap == NULL) {
  339. pgprot_t prot = ttm_io_prot(new_mem->placement,
  340. PAGE_KERNEL);
  341. ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
  342. prot);
  343. } else
  344. ret = ttm_copy_io_page(new_iomap, old_iomap, page);
  345. if (ret)
  346. goto out1;
  347. }
  348. mb();
  349. out2:
  350. old_copy = *old_mem;
  351. *old_mem = *new_mem;
  352. new_mem->mm_node = NULL;
  353. if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
  354. ttm_tt_destroy(ttm);
  355. bo->ttm = NULL;
  356. }
  357. out1:
  358. ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
  359. out:
  360. ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
  361. /*
  362. * On error, keep the mm node!
  363. */
  364. if (!ret)
  365. ttm_bo_mem_put(bo, &old_copy);
  366. return ret;
  367. }
  368. EXPORT_SYMBOL(ttm_bo_move_memcpy);
  369. static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
  370. {
  371. kfree(bo);
  372. }
  373. /**
  374. * ttm_buffer_object_transfer
  375. *
  376. * @bo: A pointer to a struct ttm_buffer_object.
  377. * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
  378. * holding the data of @bo with the old placement.
  379. *
  380. * This is a utility function that may be called after an accelerated move
  381. * has been scheduled. A new buffer object is created as a placeholder for
  382. * the old data while it's being copied. When that buffer object is idle,
  383. * it can be destroyed, releasing the space of the old placement.
  384. * Returns:
  385. * !0: Failure.
  386. */
  387. static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
  388. struct ttm_buffer_object **new_obj)
  389. {
  390. struct ttm_buffer_object *fbo;
  391. int ret;
  392. fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
  393. if (!fbo)
  394. return -ENOMEM;
  395. *fbo = *bo;
  396. /**
  397. * Fix up members that we shouldn't copy directly:
  398. * TODO: Explicit member copy would probably be better here.
  399. */
  400. atomic_inc(&bo->glob->bo_count);
  401. INIT_LIST_HEAD(&fbo->ddestroy);
  402. INIT_LIST_HEAD(&fbo->lru);
  403. INIT_LIST_HEAD(&fbo->swap);
  404. INIT_LIST_HEAD(&fbo->io_reserve_lru);
  405. mutex_init(&fbo->wu_mutex);
  406. fbo->moving = NULL;
  407. drm_vma_node_reset(&fbo->vma_node);
  408. atomic_set(&fbo->cpu_writers, 0);
  409. kref_init(&fbo->list_kref);
  410. kref_init(&fbo->kref);
  411. fbo->destroy = &ttm_transfered_destroy;
  412. fbo->acc_size = 0;
  413. fbo->resv = &fbo->ttm_resv;
  414. reservation_object_init(fbo->resv);
  415. ret = reservation_object_trylock(fbo->resv);
  416. WARN_ON(!ret);
  417. *new_obj = fbo;
  418. return 0;
  419. }
  420. pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
  421. {
  422. /* Cached mappings need no adjustment */
  423. if (caching_flags & TTM_PL_FLAG_CACHED)
  424. return tmp;
  425. #if defined(__i386__) || defined(__x86_64__)
  426. if (caching_flags & TTM_PL_FLAG_WC)
  427. tmp = pgprot_writecombine(tmp);
  428. else if (boot_cpu_data.x86 > 3)
  429. tmp = pgprot_noncached(tmp);
  430. #endif
  431. #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
  432. defined(__powerpc__)
  433. if (caching_flags & TTM_PL_FLAG_WC)
  434. tmp = pgprot_writecombine(tmp);
  435. else
  436. tmp = pgprot_noncached(tmp);
  437. #endif
  438. #if defined(__sparc__) || defined(__mips__)
  439. tmp = pgprot_noncached(tmp);
  440. #endif
  441. return tmp;
  442. }
  443. EXPORT_SYMBOL(ttm_io_prot);
  444. static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
  445. unsigned long offset,
  446. unsigned long size,
  447. struct ttm_bo_kmap_obj *map)
  448. {
  449. struct ttm_mem_reg *mem = &bo->mem;
  450. if (bo->mem.bus.addr) {
  451. map->bo_kmap_type = ttm_bo_map_premapped;
  452. map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
  453. } else {
  454. map->bo_kmap_type = ttm_bo_map_iomap;
  455. if (mem->placement & TTM_PL_FLAG_WC)
  456. map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
  457. size);
  458. else
  459. map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
  460. size);
  461. }
  462. return (!map->virtual) ? -ENOMEM : 0;
  463. }
  464. static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
  465. unsigned long start_page,
  466. unsigned long num_pages,
  467. struct ttm_bo_kmap_obj *map)
  468. {
  469. struct ttm_mem_reg *mem = &bo->mem;
  470. struct ttm_operation_ctx ctx = {
  471. .interruptible = false,
  472. .no_wait_gpu = false
  473. };
  474. struct ttm_tt *ttm = bo->ttm;
  475. pgprot_t prot;
  476. int ret;
  477. BUG_ON(!ttm);
  478. if (ttm->state == tt_unpopulated) {
  479. ret = ttm->bdev->driver->ttm_tt_populate(ttm, &ctx);
  480. if (ret)
  481. return ret;
  482. }
  483. if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
  484. /*
  485. * We're mapping a single page, and the desired
  486. * page protection is consistent with the bo.
  487. */
  488. map->bo_kmap_type = ttm_bo_map_kmap;
  489. map->page = ttm->pages[start_page];
  490. map->virtual = kmap(map->page);
  491. } else {
  492. /*
  493. * We need to use vmap to get the desired page protection
  494. * or to make the buffer object look contiguous.
  495. */
  496. prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
  497. map->bo_kmap_type = ttm_bo_map_vmap;
  498. map->virtual = vmap(ttm->pages + start_page, num_pages,
  499. 0, prot);
  500. }
  501. return (!map->virtual) ? -ENOMEM : 0;
  502. }
  503. int ttm_bo_kmap(struct ttm_buffer_object *bo,
  504. unsigned long start_page, unsigned long num_pages,
  505. struct ttm_bo_kmap_obj *map)
  506. {
  507. struct ttm_mem_type_manager *man =
  508. &bo->bdev->man[bo->mem.mem_type];
  509. unsigned long offset, size;
  510. int ret;
  511. map->virtual = NULL;
  512. map->bo = bo;
  513. if (num_pages > bo->num_pages)
  514. return -EINVAL;
  515. if (start_page > bo->num_pages)
  516. return -EINVAL;
  517. #if 0
  518. if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
  519. return -EPERM;
  520. #endif
  521. (void) ttm_mem_io_lock(man, false);
  522. ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
  523. ttm_mem_io_unlock(man);
  524. if (ret)
  525. return ret;
  526. if (!bo->mem.bus.is_iomem) {
  527. return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
  528. } else {
  529. offset = start_page << PAGE_SHIFT;
  530. size = num_pages << PAGE_SHIFT;
  531. return ttm_bo_ioremap(bo, offset, size, map);
  532. }
  533. }
  534. EXPORT_SYMBOL(ttm_bo_kmap);
  535. void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
  536. {
  537. struct ttm_buffer_object *bo = map->bo;
  538. struct ttm_mem_type_manager *man =
  539. &bo->bdev->man[bo->mem.mem_type];
  540. if (!map->virtual)
  541. return;
  542. switch (map->bo_kmap_type) {
  543. case ttm_bo_map_iomap:
  544. iounmap(map->virtual);
  545. break;
  546. case ttm_bo_map_vmap:
  547. vunmap(map->virtual);
  548. break;
  549. case ttm_bo_map_kmap:
  550. kunmap(map->page);
  551. break;
  552. case ttm_bo_map_premapped:
  553. break;
  554. default:
  555. BUG();
  556. }
  557. (void) ttm_mem_io_lock(man, false);
  558. ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
  559. ttm_mem_io_unlock(man);
  560. map->virtual = NULL;
  561. map->page = NULL;
  562. }
  563. EXPORT_SYMBOL(ttm_bo_kunmap);
  564. int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
  565. struct dma_fence *fence,
  566. bool evict,
  567. struct ttm_mem_reg *new_mem)
  568. {
  569. struct ttm_bo_device *bdev = bo->bdev;
  570. struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
  571. struct ttm_mem_reg *old_mem = &bo->mem;
  572. int ret;
  573. struct ttm_buffer_object *ghost_obj;
  574. reservation_object_add_excl_fence(bo->resv, fence);
  575. if (evict) {
  576. ret = ttm_bo_wait(bo, false, false);
  577. if (ret)
  578. return ret;
  579. if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
  580. ttm_tt_destroy(bo->ttm);
  581. bo->ttm = NULL;
  582. }
  583. ttm_bo_free_old_node(bo);
  584. } else {
  585. /**
  586. * This should help pipeline ordinary buffer moves.
  587. *
  588. * Hang old buffer memory on a new buffer object,
  589. * and leave it to be released when the GPU
  590. * operation has completed.
  591. */
  592. dma_fence_put(bo->moving);
  593. bo->moving = dma_fence_get(fence);
  594. ret = ttm_buffer_object_transfer(bo, &ghost_obj);
  595. if (ret)
  596. return ret;
  597. reservation_object_add_excl_fence(ghost_obj->resv, fence);
  598. /**
  599. * If we're not moving to fixed memory, the TTM object
  600. * needs to stay alive. Otherwhise hang it on the ghost
  601. * bo to be unbound and destroyed.
  602. */
  603. if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
  604. ghost_obj->ttm = NULL;
  605. else
  606. bo->ttm = NULL;
  607. ttm_bo_unreserve(ghost_obj);
  608. ttm_bo_unref(&ghost_obj);
  609. }
  610. *old_mem = *new_mem;
  611. new_mem->mm_node = NULL;
  612. return 0;
  613. }
  614. EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
  615. int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
  616. struct dma_fence *fence, bool evict,
  617. struct ttm_mem_reg *new_mem)
  618. {
  619. struct ttm_bo_device *bdev = bo->bdev;
  620. struct ttm_mem_reg *old_mem = &bo->mem;
  621. struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
  622. struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
  623. int ret;
  624. reservation_object_add_excl_fence(bo->resv, fence);
  625. if (!evict) {
  626. struct ttm_buffer_object *ghost_obj;
  627. /**
  628. * This should help pipeline ordinary buffer moves.
  629. *
  630. * Hang old buffer memory on a new buffer object,
  631. * and leave it to be released when the GPU
  632. * operation has completed.
  633. */
  634. dma_fence_put(bo->moving);
  635. bo->moving = dma_fence_get(fence);
  636. ret = ttm_buffer_object_transfer(bo, &ghost_obj);
  637. if (ret)
  638. return ret;
  639. reservation_object_add_excl_fence(ghost_obj->resv, fence);
  640. /**
  641. * If we're not moving to fixed memory, the TTM object
  642. * needs to stay alive. Otherwhise hang it on the ghost
  643. * bo to be unbound and destroyed.
  644. */
  645. if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
  646. ghost_obj->ttm = NULL;
  647. else
  648. bo->ttm = NULL;
  649. ttm_bo_unreserve(ghost_obj);
  650. ttm_bo_unref(&ghost_obj);
  651. } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
  652. /**
  653. * BO doesn't have a TTM we need to bind/unbind. Just remember
  654. * this eviction and free up the allocation
  655. */
  656. spin_lock(&from->move_lock);
  657. if (!from->move || dma_fence_is_later(fence, from->move)) {
  658. dma_fence_put(from->move);
  659. from->move = dma_fence_get(fence);
  660. }
  661. spin_unlock(&from->move_lock);
  662. ttm_bo_free_old_node(bo);
  663. dma_fence_put(bo->moving);
  664. bo->moving = dma_fence_get(fence);
  665. } else {
  666. /**
  667. * Last resort, wait for the move to be completed.
  668. *
  669. * Should never happen in pratice.
  670. */
  671. ret = ttm_bo_wait(bo, false, false);
  672. if (ret)
  673. return ret;
  674. if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
  675. ttm_tt_destroy(bo->ttm);
  676. bo->ttm = NULL;
  677. }
  678. ttm_bo_free_old_node(bo);
  679. }
  680. *old_mem = *new_mem;
  681. new_mem->mm_node = NULL;
  682. return 0;
  683. }
  684. EXPORT_SYMBOL(ttm_bo_pipeline_move);