ttm_tt.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500
  1. /* SPDX-License-Identifier: GPL-2.0 OR MIT */
  2. /**************************************************************************
  3. *
  4. * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. **************************************************************************/
  28. /*
  29. * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  30. */
  31. #define pr_fmt(fmt) "[TTM] " fmt
  32. #include <linux/sched.h>
  33. #include <linux/pagemap.h>
  34. #include <linux/shmem_fs.h>
  35. #include <linux/file.h>
  36. #include <drm/drm_cache.h>
  37. #include <drm/ttm/ttm_bo_driver.h>
  38. #include <drm/ttm/ttm_page_alloc.h>
  39. #include <drm/ttm/ttm_set_memory.h>
  40. /**
  41. * Allocates a ttm structure for the given BO.
  42. */
  43. int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
  44. {
  45. struct ttm_bo_device *bdev = bo->bdev;
  46. uint32_t page_flags = 0;
  47. reservation_object_assert_held(bo->resv);
  48. if (bdev->need_dma32)
  49. page_flags |= TTM_PAGE_FLAG_DMA32;
  50. if (bdev->no_retry)
  51. page_flags |= TTM_PAGE_FLAG_NO_RETRY;
  52. switch (bo->type) {
  53. case ttm_bo_type_device:
  54. if (zero_alloc)
  55. page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
  56. break;
  57. case ttm_bo_type_kernel:
  58. break;
  59. case ttm_bo_type_sg:
  60. page_flags |= TTM_PAGE_FLAG_SG;
  61. break;
  62. default:
  63. bo->ttm = NULL;
  64. pr_err("Illegal buffer object type\n");
  65. return -EINVAL;
  66. }
  67. bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags);
  68. if (unlikely(bo->ttm == NULL))
  69. return -ENOMEM;
  70. return 0;
  71. }
  72. /**
  73. * Allocates storage for pointers to the pages that back the ttm.
  74. */
  75. static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
  76. {
  77. ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
  78. GFP_KERNEL | __GFP_ZERO);
  79. if (!ttm->pages)
  80. return -ENOMEM;
  81. return 0;
  82. }
  83. static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
  84. {
  85. ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
  86. sizeof(*ttm->ttm.pages) +
  87. sizeof(*ttm->dma_address),
  88. GFP_KERNEL | __GFP_ZERO);
  89. if (!ttm->ttm.pages)
  90. return -ENOMEM;
  91. ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
  92. return 0;
  93. }
  94. static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
  95. {
  96. ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages,
  97. sizeof(*ttm->dma_address),
  98. GFP_KERNEL | __GFP_ZERO);
  99. if (!ttm->dma_address)
  100. return -ENOMEM;
  101. return 0;
  102. }
  103. static int ttm_tt_set_page_caching(struct page *p,
  104. enum ttm_caching_state c_old,
  105. enum ttm_caching_state c_new)
  106. {
  107. int ret = 0;
  108. if (PageHighMem(p))
  109. return 0;
  110. if (c_old != tt_cached) {
  111. /* p isn't in the default caching state, set it to
  112. * writeback first to free its current memtype. */
  113. ret = ttm_set_pages_wb(p, 1);
  114. if (ret)
  115. return ret;
  116. }
  117. if (c_new == tt_wc)
  118. ret = ttm_set_pages_wc(p, 1);
  119. else if (c_new == tt_uncached)
  120. ret = ttm_set_pages_uc(p, 1);
  121. return ret;
  122. }
  123. /*
  124. * Change caching policy for the linear kernel map
  125. * for range of pages in a ttm.
  126. */
  127. static int ttm_tt_set_caching(struct ttm_tt *ttm,
  128. enum ttm_caching_state c_state)
  129. {
  130. int i, j;
  131. struct page *cur_page;
  132. int ret;
  133. if (ttm->caching_state == c_state)
  134. return 0;
  135. if (ttm->state == tt_unpopulated) {
  136. /* Change caching but don't populate */
  137. ttm->caching_state = c_state;
  138. return 0;
  139. }
  140. if (ttm->caching_state == tt_cached)
  141. drm_clflush_pages(ttm->pages, ttm->num_pages);
  142. for (i = 0; i < ttm->num_pages; ++i) {
  143. cur_page = ttm->pages[i];
  144. if (likely(cur_page != NULL)) {
  145. ret = ttm_tt_set_page_caching(cur_page,
  146. ttm->caching_state,
  147. c_state);
  148. if (unlikely(ret != 0))
  149. goto out_err;
  150. }
  151. }
  152. ttm->caching_state = c_state;
  153. return 0;
  154. out_err:
  155. for (j = 0; j < i; ++j) {
  156. cur_page = ttm->pages[j];
  157. if (likely(cur_page != NULL)) {
  158. (void)ttm_tt_set_page_caching(cur_page, c_state,
  159. ttm->caching_state);
  160. }
  161. }
  162. return ret;
  163. }
  164. int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
  165. {
  166. enum ttm_caching_state state;
  167. if (placement & TTM_PL_FLAG_WC)
  168. state = tt_wc;
  169. else if (placement & TTM_PL_FLAG_UNCACHED)
  170. state = tt_uncached;
  171. else
  172. state = tt_cached;
  173. return ttm_tt_set_caching(ttm, state);
  174. }
  175. EXPORT_SYMBOL(ttm_tt_set_placement_caching);
  176. void ttm_tt_destroy(struct ttm_tt *ttm)
  177. {
  178. if (ttm == NULL)
  179. return;
  180. ttm_tt_unbind(ttm);
  181. if (ttm->state == tt_unbound)
  182. ttm_tt_unpopulate(ttm);
  183. if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
  184. ttm->swap_storage)
  185. fput(ttm->swap_storage);
  186. ttm->swap_storage = NULL;
  187. ttm->func->destroy(ttm);
  188. }
  189. void ttm_tt_init_fields(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
  190. uint32_t page_flags)
  191. {
  192. ttm->bdev = bo->bdev;
  193. ttm->num_pages = bo->num_pages;
  194. ttm->caching_state = tt_cached;
  195. ttm->page_flags = page_flags;
  196. ttm->state = tt_unpopulated;
  197. ttm->swap_storage = NULL;
  198. ttm->sg = bo->sg;
  199. }
  200. int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
  201. uint32_t page_flags)
  202. {
  203. ttm_tt_init_fields(ttm, bo, page_flags);
  204. if (ttm_tt_alloc_page_directory(ttm)) {
  205. ttm_tt_destroy(ttm);
  206. pr_err("Failed allocating page table\n");
  207. return -ENOMEM;
  208. }
  209. return 0;
  210. }
  211. EXPORT_SYMBOL(ttm_tt_init);
  212. void ttm_tt_fini(struct ttm_tt *ttm)
  213. {
  214. kvfree(ttm->pages);
  215. ttm->pages = NULL;
  216. }
  217. EXPORT_SYMBOL(ttm_tt_fini);
  218. int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
  219. uint32_t page_flags)
  220. {
  221. struct ttm_tt *ttm = &ttm_dma->ttm;
  222. ttm_tt_init_fields(ttm, bo, page_flags);
  223. INIT_LIST_HEAD(&ttm_dma->pages_list);
  224. if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
  225. ttm_tt_destroy(ttm);
  226. pr_err("Failed allocating page table\n");
  227. return -ENOMEM;
  228. }
  229. return 0;
  230. }
  231. EXPORT_SYMBOL(ttm_dma_tt_init);
  232. int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
  233. uint32_t page_flags)
  234. {
  235. struct ttm_tt *ttm = &ttm_dma->ttm;
  236. int ret;
  237. ttm_tt_init_fields(ttm, bo, page_flags);
  238. INIT_LIST_HEAD(&ttm_dma->pages_list);
  239. if (page_flags & TTM_PAGE_FLAG_SG)
  240. ret = ttm_sg_tt_alloc_page_directory(ttm_dma);
  241. else
  242. ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
  243. if (ret) {
  244. ttm_tt_destroy(ttm);
  245. pr_err("Failed allocating page table\n");
  246. return -ENOMEM;
  247. }
  248. return 0;
  249. }
  250. EXPORT_SYMBOL(ttm_sg_tt_init);
  251. void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
  252. {
  253. struct ttm_tt *ttm = &ttm_dma->ttm;
  254. if (ttm->pages)
  255. kvfree(ttm->pages);
  256. else
  257. kvfree(ttm_dma->dma_address);
  258. ttm->pages = NULL;
  259. ttm_dma->dma_address = NULL;
  260. }
  261. EXPORT_SYMBOL(ttm_dma_tt_fini);
  262. void ttm_tt_unbind(struct ttm_tt *ttm)
  263. {
  264. int ret;
  265. if (ttm->state == tt_bound) {
  266. ret = ttm->func->unbind(ttm);
  267. BUG_ON(ret);
  268. ttm->state = tt_unbound;
  269. }
  270. }
  271. int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
  272. struct ttm_operation_ctx *ctx)
  273. {
  274. int ret = 0;
  275. if (!ttm)
  276. return -EINVAL;
  277. if (ttm->state == tt_bound)
  278. return 0;
  279. ret = ttm_tt_populate(ttm, ctx);
  280. if (ret)
  281. return ret;
  282. ret = ttm->func->bind(ttm, bo_mem);
  283. if (unlikely(ret != 0))
  284. return ret;
  285. ttm->state = tt_bound;
  286. return 0;
  287. }
  288. EXPORT_SYMBOL(ttm_tt_bind);
  289. int ttm_tt_swapin(struct ttm_tt *ttm)
  290. {
  291. struct address_space *swap_space;
  292. struct file *swap_storage;
  293. struct page *from_page;
  294. struct page *to_page;
  295. int i;
  296. int ret = -ENOMEM;
  297. swap_storage = ttm->swap_storage;
  298. BUG_ON(swap_storage == NULL);
  299. swap_space = swap_storage->f_mapping;
  300. for (i = 0; i < ttm->num_pages; ++i) {
  301. gfp_t gfp_mask = mapping_gfp_mask(swap_space);
  302. gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
  303. from_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
  304. if (IS_ERR(from_page)) {
  305. ret = PTR_ERR(from_page);
  306. goto out_err;
  307. }
  308. to_page = ttm->pages[i];
  309. if (unlikely(to_page == NULL))
  310. goto out_err;
  311. copy_highpage(to_page, from_page);
  312. put_page(from_page);
  313. }
  314. if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
  315. fput(swap_storage);
  316. ttm->swap_storage = NULL;
  317. ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
  318. return 0;
  319. out_err:
  320. return ret;
  321. }
  322. int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
  323. {
  324. struct address_space *swap_space;
  325. struct file *swap_storage;
  326. struct page *from_page;
  327. struct page *to_page;
  328. int i;
  329. int ret = -ENOMEM;
  330. BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
  331. BUG_ON(ttm->caching_state != tt_cached);
  332. if (!persistent_swap_storage) {
  333. swap_storage = shmem_file_setup("ttm swap",
  334. ttm->num_pages << PAGE_SHIFT,
  335. 0);
  336. if (IS_ERR(swap_storage)) {
  337. pr_err("Failed allocating swap storage\n");
  338. return PTR_ERR(swap_storage);
  339. }
  340. } else {
  341. swap_storage = persistent_swap_storage;
  342. }
  343. swap_space = swap_storage->f_mapping;
  344. for (i = 0; i < ttm->num_pages; ++i) {
  345. gfp_t gfp_mask = mapping_gfp_mask(swap_space);
  346. gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
  347. from_page = ttm->pages[i];
  348. if (unlikely(from_page == NULL))
  349. continue;
  350. to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
  351. if (IS_ERR(to_page)) {
  352. ret = PTR_ERR(to_page);
  353. goto out_err;
  354. }
  355. copy_highpage(to_page, from_page);
  356. set_page_dirty(to_page);
  357. mark_page_accessed(to_page);
  358. put_page(to_page);
  359. }
  360. ttm_tt_unpopulate(ttm);
  361. ttm->swap_storage = swap_storage;
  362. ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
  363. if (persistent_swap_storage)
  364. ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
  365. return 0;
  366. out_err:
  367. if (!persistent_swap_storage)
  368. fput(swap_storage);
  369. return ret;
  370. }
  371. static void ttm_tt_add_mapping(struct ttm_tt *ttm)
  372. {
  373. pgoff_t i;
  374. if (ttm->page_flags & TTM_PAGE_FLAG_SG)
  375. return;
  376. for (i = 0; i < ttm->num_pages; ++i)
  377. ttm->pages[i]->mapping = ttm->bdev->dev_mapping;
  378. }
  379. int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
  380. {
  381. int ret;
  382. if (ttm->state != tt_unpopulated)
  383. return 0;
  384. if (ttm->bdev->driver->ttm_tt_populate)
  385. ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx);
  386. else
  387. ret = ttm_pool_populate(ttm, ctx);
  388. if (!ret)
  389. ttm_tt_add_mapping(ttm);
  390. return ret;
  391. }
  392. static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
  393. {
  394. pgoff_t i;
  395. struct page **page = ttm->pages;
  396. if (ttm->page_flags & TTM_PAGE_FLAG_SG)
  397. return;
  398. for (i = 0; i < ttm->num_pages; ++i) {
  399. (*page)->mapping = NULL;
  400. (*page++)->index = 0;
  401. }
  402. }
  403. void ttm_tt_unpopulate(struct ttm_tt *ttm)
  404. {
  405. if (ttm->state == tt_unpopulated)
  406. return;
  407. ttm_tt_clear_mapping(ttm);
  408. if (ttm->bdev->driver->ttm_tt_unpopulate)
  409. ttm->bdev->driver->ttm_tt_unpopulate(ttm);
  410. else
  411. ttm_pool_unpopulate(ttm);
  412. }