ttm_tt.c 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401
  1. /**************************************************************************
  2. *
  3. * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. /*
  28. * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29. */
  30. #define pr_fmt(fmt) "[TTM] " fmt
  31. #include <linux/sched.h>
  32. #include <linux/highmem.h>
  33. #include <linux/pagemap.h>
  34. #include <linux/shmem_fs.h>
  35. #include <linux/file.h>
  36. #include <linux/swap.h>
  37. #include <linux/slab.h>
  38. #include <linux/export.h>
  39. #include <drm/drm_cache.h>
  40. #include <drm/ttm/ttm_module.h>
  41. #include <drm/ttm/ttm_bo_driver.h>
  42. #include <drm/ttm/ttm_placement.h>
  43. #include <drm/ttm/ttm_page_alloc.h>
  44. #ifdef CONFIG_X86
  45. #include <asm/set_memory.h>
  46. #endif
  47. /**
  48. * Allocates storage for pointers to the pages that back the ttm.
  49. */
  50. static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
  51. {
  52. ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
  53. GFP_KERNEL | __GFP_ZERO);
  54. }
  55. static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
  56. {
  57. ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
  58. sizeof(*ttm->ttm.pages) +
  59. sizeof(*ttm->dma_address),
  60. GFP_KERNEL | __GFP_ZERO);
  61. ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
  62. }
  63. #ifdef CONFIG_X86
  64. static inline int ttm_tt_set_page_caching(struct page *p,
  65. enum ttm_caching_state c_old,
  66. enum ttm_caching_state c_new)
  67. {
  68. int ret = 0;
  69. if (PageHighMem(p))
  70. return 0;
  71. if (c_old != tt_cached) {
  72. /* p isn't in the default caching state, set it to
  73. * writeback first to free its current memtype. */
  74. ret = set_pages_wb(p, 1);
  75. if (ret)
  76. return ret;
  77. }
  78. if (c_new == tt_wc)
  79. ret = set_memory_wc((unsigned long) page_address(p), 1);
  80. else if (c_new == tt_uncached)
  81. ret = set_pages_uc(p, 1);
  82. return ret;
  83. }
  84. #else /* CONFIG_X86 */
  85. static inline int ttm_tt_set_page_caching(struct page *p,
  86. enum ttm_caching_state c_old,
  87. enum ttm_caching_state c_new)
  88. {
  89. return 0;
  90. }
  91. #endif /* CONFIG_X86 */
  92. /*
  93. * Change caching policy for the linear kernel map
  94. * for range of pages in a ttm.
  95. */
  96. static int ttm_tt_set_caching(struct ttm_tt *ttm,
  97. enum ttm_caching_state c_state)
  98. {
  99. int i, j;
  100. struct page *cur_page;
  101. int ret;
  102. if (ttm->caching_state == c_state)
  103. return 0;
  104. if (ttm->state == tt_unpopulated) {
  105. /* Change caching but don't populate */
  106. ttm->caching_state = c_state;
  107. return 0;
  108. }
  109. if (ttm->caching_state == tt_cached)
  110. drm_clflush_pages(ttm->pages, ttm->num_pages);
  111. for (i = 0; i < ttm->num_pages; ++i) {
  112. cur_page = ttm->pages[i];
  113. if (likely(cur_page != NULL)) {
  114. ret = ttm_tt_set_page_caching(cur_page,
  115. ttm->caching_state,
  116. c_state);
  117. if (unlikely(ret != 0))
  118. goto out_err;
  119. }
  120. }
  121. ttm->caching_state = c_state;
  122. return 0;
  123. out_err:
  124. for (j = 0; j < i; ++j) {
  125. cur_page = ttm->pages[j];
  126. if (likely(cur_page != NULL)) {
  127. (void)ttm_tt_set_page_caching(cur_page, c_state,
  128. ttm->caching_state);
  129. }
  130. }
  131. return ret;
  132. }
  133. int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
  134. {
  135. enum ttm_caching_state state;
  136. if (placement & TTM_PL_FLAG_WC)
  137. state = tt_wc;
  138. else if (placement & TTM_PL_FLAG_UNCACHED)
  139. state = tt_uncached;
  140. else
  141. state = tt_cached;
  142. return ttm_tt_set_caching(ttm, state);
  143. }
  144. EXPORT_SYMBOL(ttm_tt_set_placement_caching);
  145. void ttm_tt_destroy(struct ttm_tt *ttm)
  146. {
  147. if (ttm == NULL)
  148. return;
  149. ttm_tt_unbind(ttm);
  150. if (ttm->state == tt_unbound)
  151. ttm_tt_unpopulate(ttm);
  152. if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
  153. ttm->swap_storage)
  154. fput(ttm->swap_storage);
  155. ttm->swap_storage = NULL;
  156. ttm->func->destroy(ttm);
  157. }
  158. int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
  159. unsigned long size, uint32_t page_flags,
  160. struct page *dummy_read_page)
  161. {
  162. ttm->bdev = bdev;
  163. ttm->glob = bdev->glob;
  164. ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  165. ttm->caching_state = tt_cached;
  166. ttm->page_flags = page_flags;
  167. ttm->dummy_read_page = dummy_read_page;
  168. ttm->state = tt_unpopulated;
  169. ttm->swap_storage = NULL;
  170. ttm_tt_alloc_page_directory(ttm);
  171. if (!ttm->pages) {
  172. ttm_tt_destroy(ttm);
  173. pr_err("Failed allocating page table\n");
  174. return -ENOMEM;
  175. }
  176. return 0;
  177. }
  178. EXPORT_SYMBOL(ttm_tt_init);
  179. void ttm_tt_fini(struct ttm_tt *ttm)
  180. {
  181. kvfree(ttm->pages);
  182. ttm->pages = NULL;
  183. }
  184. EXPORT_SYMBOL(ttm_tt_fini);
  185. int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
  186. unsigned long size, uint32_t page_flags,
  187. struct page *dummy_read_page)
  188. {
  189. struct ttm_tt *ttm = &ttm_dma->ttm;
  190. ttm->bdev = bdev;
  191. ttm->glob = bdev->glob;
  192. ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  193. ttm->caching_state = tt_cached;
  194. ttm->page_flags = page_flags;
  195. ttm->dummy_read_page = dummy_read_page;
  196. ttm->state = tt_unpopulated;
  197. ttm->swap_storage = NULL;
  198. INIT_LIST_HEAD(&ttm_dma->pages_list);
  199. ttm_dma_tt_alloc_page_directory(ttm_dma);
  200. if (!ttm->pages) {
  201. ttm_tt_destroy(ttm);
  202. pr_err("Failed allocating page table\n");
  203. return -ENOMEM;
  204. }
  205. return 0;
  206. }
  207. EXPORT_SYMBOL(ttm_dma_tt_init);
  208. void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
  209. {
  210. struct ttm_tt *ttm = &ttm_dma->ttm;
  211. kvfree(ttm->pages);
  212. ttm->pages = NULL;
  213. ttm_dma->dma_address = NULL;
  214. }
  215. EXPORT_SYMBOL(ttm_dma_tt_fini);
  216. void ttm_tt_unbind(struct ttm_tt *ttm)
  217. {
  218. int ret;
  219. if (ttm->state == tt_bound) {
  220. ret = ttm->func->unbind(ttm);
  221. BUG_ON(ret);
  222. ttm->state = tt_unbound;
  223. }
  224. }
  225. int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
  226. {
  227. int ret = 0;
  228. if (!ttm)
  229. return -EINVAL;
  230. if (ttm->state == tt_bound)
  231. return 0;
  232. ret = ttm->bdev->driver->ttm_tt_populate(ttm);
  233. if (ret)
  234. return ret;
  235. ret = ttm->func->bind(ttm, bo_mem);
  236. if (unlikely(ret != 0))
  237. return ret;
  238. ttm->state = tt_bound;
  239. return 0;
  240. }
  241. EXPORT_SYMBOL(ttm_tt_bind);
  242. int ttm_tt_swapin(struct ttm_tt *ttm)
  243. {
  244. struct address_space *swap_space;
  245. struct file *swap_storage;
  246. struct page *from_page;
  247. struct page *to_page;
  248. int i;
  249. int ret = -ENOMEM;
  250. swap_storage = ttm->swap_storage;
  251. BUG_ON(swap_storage == NULL);
  252. swap_space = swap_storage->f_mapping;
  253. for (i = 0; i < ttm->num_pages; ++i) {
  254. from_page = shmem_read_mapping_page(swap_space, i);
  255. if (IS_ERR(from_page)) {
  256. ret = PTR_ERR(from_page);
  257. goto out_err;
  258. }
  259. to_page = ttm->pages[i];
  260. if (unlikely(to_page == NULL))
  261. goto out_err;
  262. copy_highpage(to_page, from_page);
  263. put_page(from_page);
  264. }
  265. if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
  266. fput(swap_storage);
  267. ttm->swap_storage = NULL;
  268. ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
  269. return 0;
  270. out_err:
  271. return ret;
  272. }
  273. int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
  274. {
  275. struct address_space *swap_space;
  276. struct file *swap_storage;
  277. struct page *from_page;
  278. struct page *to_page;
  279. int i;
  280. int ret = -ENOMEM;
  281. BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
  282. BUG_ON(ttm->caching_state != tt_cached);
  283. if (!persistent_swap_storage) {
  284. swap_storage = shmem_file_setup("ttm swap",
  285. ttm->num_pages << PAGE_SHIFT,
  286. 0);
  287. if (IS_ERR(swap_storage)) {
  288. pr_err("Failed allocating swap storage\n");
  289. return PTR_ERR(swap_storage);
  290. }
  291. } else
  292. swap_storage = persistent_swap_storage;
  293. swap_space = swap_storage->f_mapping;
  294. for (i = 0; i < ttm->num_pages; ++i) {
  295. from_page = ttm->pages[i];
  296. if (unlikely(from_page == NULL))
  297. continue;
  298. to_page = shmem_read_mapping_page(swap_space, i);
  299. if (IS_ERR(to_page)) {
  300. ret = PTR_ERR(to_page);
  301. goto out_err;
  302. }
  303. copy_highpage(to_page, from_page);
  304. set_page_dirty(to_page);
  305. mark_page_accessed(to_page);
  306. put_page(to_page);
  307. }
  308. ttm_tt_unpopulate(ttm);
  309. ttm->swap_storage = swap_storage;
  310. ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
  311. if (persistent_swap_storage)
  312. ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
  313. return 0;
  314. out_err:
  315. if (!persistent_swap_storage)
  316. fput(swap_storage);
  317. return ret;
  318. }
  319. static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
  320. {
  321. pgoff_t i;
  322. struct page **page = ttm->pages;
  323. if (ttm->page_flags & TTM_PAGE_FLAG_SG)
  324. return;
  325. for (i = 0; i < ttm->num_pages; ++i) {
  326. (*page)->mapping = NULL;
  327. (*page++)->index = 0;
  328. }
  329. }
  330. void ttm_tt_unpopulate(struct ttm_tt *ttm)
  331. {
  332. if (ttm->state == tt_unpopulated)
  333. return;
  334. ttm_tt_clear_mapping(ttm);
  335. ttm->bdev->driver->ttm_tt_unpopulate(ttm);
  336. }