ttm_page_alloc_dma.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284
  1. /*
  2. * Copyright 2011 (c) Oracle Corp.
  3. * Permission is hereby granted, free of charge, to any person obtaining a
  4. * copy of this software and associated documentation files (the "Software"),
  5. * to deal in the Software without restriction, including without limitation
  6. * the rights to use, copy, modify, merge, publish, distribute, sub license,
  7. * and/or sell copies of the Software, and to permit persons to whom the
  8. * Software is furnished to do so, subject to the following conditions:
  9. *
  10. * The above copyright notice and this permission notice (including the
  11. * next paragraph) shall be included in all copies or substantial portions
  12. * of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  17. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  19. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  20. * DEALINGS IN THE SOFTWARE.
  21. *
  22. * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
  23. */
  24. /*
  25. * A simple DMA pool losely based on dmapool.c. It has certain advantages
  26. * over the DMA pools:
  27. * - Pool collects resently freed pages for reuse (and hooks up to
  28. * the shrinker).
  29. * - Tracks currently in use pages
  30. * - Tracks whether the page is UC, WB or cached (and reverts to WB
  31. * when freed).
  32. */
  33. #if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
  34. #define pr_fmt(fmt) "[TTM] " fmt
  35. #include <linux/dma-mapping.h>
  36. #include <linux/list.h>
  37. #include <linux/seq_file.h> /* for seq_printf */
  38. #include <linux/slab.h>
  39. #include <linux/spinlock.h>
  40. #include <linux/highmem.h>
  41. #include <linux/mm_types.h>
  42. #include <linux/module.h>
  43. #include <linux/mm.h>
  44. #include <linux/atomic.h>
  45. #include <linux/device.h>
  46. #include <linux/kthread.h>
  47. #include <drm/ttm/ttm_bo_driver.h>
  48. #include <drm/ttm/ttm_page_alloc.h>
  49. #if IS_ENABLED(CONFIG_AGP)
  50. #include <asm/agp.h>
  51. #endif
  52. #ifdef CONFIG_X86
  53. #include <asm/set_memory.h>
  54. #endif
  55. #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
  56. #define SMALL_ALLOCATION 4
  57. #define FREE_ALL_PAGES (~0U)
  58. #define VADDR_FLAG_HUGE_POOL 1UL
  59. #define VADDR_FLAG_UPDATED_COUNT 2UL
  60. enum pool_type {
  61. IS_UNDEFINED = 0,
  62. IS_WC = 1 << 1,
  63. IS_UC = 1 << 2,
  64. IS_CACHED = 1 << 3,
  65. IS_DMA32 = 1 << 4,
  66. IS_HUGE = 1 << 5
  67. };
  68. /*
  69. * The pool structure. There are up to nine pools:
  70. * - generic (not restricted to DMA32):
  71. * - write combined, uncached, cached.
  72. * - dma32 (up to 2^32 - so up 4GB):
  73. * - write combined, uncached, cached.
  74. * - huge (not restricted to DMA32):
  75. * - write combined, uncached, cached.
  76. * for each 'struct device'. The 'cached' is for pages that are actively used.
  77. * The other ones can be shrunk by the shrinker API if neccessary.
  78. * @pools: The 'struct device->dma_pools' link.
  79. * @type: Type of the pool
  80. * @lock: Protects the free_list from concurrnet access. Must be
  81. * used with irqsave/irqrestore variants because pool allocator maybe called
  82. * from delayed work.
  83. * @free_list: Pool of pages that are free to be used. No order requirements.
  84. * @dev: The device that is associated with these pools.
  85. * @size: Size used during DMA allocation.
  86. * @npages_free: Count of available pages for re-use.
  87. * @npages_in_use: Count of pages that are in use.
  88. * @nfrees: Stats when pool is shrinking.
  89. * @nrefills: Stats when the pool is grown.
  90. * @gfp_flags: Flags to pass for alloc_page.
  91. * @name: Name of the pool.
  92. * @dev_name: Name derieved from dev - similar to how dev_info works.
  93. * Used during shutdown as the dev_info during release is unavailable.
  94. */
  95. struct dma_pool {
  96. struct list_head pools; /* The 'struct device->dma_pools link */
  97. enum pool_type type;
  98. spinlock_t lock;
  99. struct list_head free_list;
  100. struct device *dev;
  101. unsigned size;
  102. unsigned npages_free;
  103. unsigned npages_in_use;
  104. unsigned long nfrees; /* Stats when shrunk. */
  105. unsigned long nrefills; /* Stats when grown. */
  106. gfp_t gfp_flags;
  107. char name[13]; /* "cached dma32" */
  108. char dev_name[64]; /* Constructed from dev */
  109. };
  110. /*
  111. * The accounting page keeping track of the allocated page along with
  112. * the DMA address.
  113. * @page_list: The link to the 'page_list' in 'struct dma_pool'.
  114. * @vaddr: The virtual address of the page and a flag if the page belongs to a
  115. * huge pool
  116. * @dma: The bus address of the page. If the page is not allocated
  117. * via the DMA API, it will be -1.
  118. */
  119. struct dma_page {
  120. struct list_head page_list;
  121. unsigned long vaddr;
  122. struct page *p;
  123. dma_addr_t dma;
  124. };
  125. /*
  126. * Limits for the pool. They are handled without locks because only place where
  127. * they may change is in sysfs store. They won't have immediate effect anyway
  128. * so forcing serialization to access them is pointless.
  129. */
  130. struct ttm_pool_opts {
  131. unsigned alloc_size;
  132. unsigned max_size;
  133. unsigned small;
  134. };
  135. /*
  136. * Contains the list of all of the 'struct device' and their corresponding
  137. * DMA pools. Guarded by _mutex->lock.
  138. * @pools: The link to 'struct ttm_pool_manager->pools'
  139. * @dev: The 'struct device' associated with the 'pool'
  140. * @pool: The 'struct dma_pool' associated with the 'dev'
  141. */
  142. struct device_pools {
  143. struct list_head pools;
  144. struct device *dev;
  145. struct dma_pool *pool;
  146. };
  147. /*
  148. * struct ttm_pool_manager - Holds memory pools for fast allocation
  149. *
  150. * @lock: Lock used when adding/removing from pools
  151. * @pools: List of 'struct device' and 'struct dma_pool' tuples.
  152. * @options: Limits for the pool.
  153. * @npools: Total amount of pools in existence.
  154. * @shrinker: The structure used by [un|]register_shrinker
  155. */
  156. struct ttm_pool_manager {
  157. struct mutex lock;
  158. struct list_head pools;
  159. struct ttm_pool_opts options;
  160. unsigned npools;
  161. struct shrinker mm_shrink;
  162. struct kobject kobj;
  163. };
  164. static struct ttm_pool_manager *_manager;
  165. static struct attribute ttm_page_pool_max = {
  166. .name = "pool_max_size",
  167. .mode = S_IRUGO | S_IWUSR
  168. };
  169. static struct attribute ttm_page_pool_small = {
  170. .name = "pool_small_allocation",
  171. .mode = S_IRUGO | S_IWUSR
  172. };
  173. static struct attribute ttm_page_pool_alloc_size = {
  174. .name = "pool_allocation_size",
  175. .mode = S_IRUGO | S_IWUSR
  176. };
  177. static struct attribute *ttm_pool_attrs[] = {
  178. &ttm_page_pool_max,
  179. &ttm_page_pool_small,
  180. &ttm_page_pool_alloc_size,
  181. NULL
  182. };
  183. static void ttm_pool_kobj_release(struct kobject *kobj)
  184. {
  185. struct ttm_pool_manager *m =
  186. container_of(kobj, struct ttm_pool_manager, kobj);
  187. kfree(m);
  188. }
  189. static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
  190. const char *buffer, size_t size)
  191. {
  192. struct ttm_pool_manager *m =
  193. container_of(kobj, struct ttm_pool_manager, kobj);
  194. int chars;
  195. unsigned val;
  196. chars = sscanf(buffer, "%u", &val);
  197. if (chars == 0)
  198. return size;
  199. /* Convert kb to number of pages */
  200. val = val / (PAGE_SIZE >> 10);
  201. if (attr == &ttm_page_pool_max) {
  202. m->options.max_size = val;
  203. } else if (attr == &ttm_page_pool_small) {
  204. m->options.small = val;
  205. } else if (attr == &ttm_page_pool_alloc_size) {
  206. if (val > NUM_PAGES_TO_ALLOC*8) {
  207. pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
  208. NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
  209. NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
  210. return size;
  211. } else if (val > NUM_PAGES_TO_ALLOC) {
  212. pr_warn("Setting allocation size to larger than %lu is not recommended\n",
  213. NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
  214. }
  215. m->options.alloc_size = val;
  216. }
  217. return size;
  218. }
  219. static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
  220. char *buffer)
  221. {
  222. struct ttm_pool_manager *m =
  223. container_of(kobj, struct ttm_pool_manager, kobj);
  224. unsigned val = 0;
  225. if (attr == &ttm_page_pool_max)
  226. val = m->options.max_size;
  227. else if (attr == &ttm_page_pool_small)
  228. val = m->options.small;
  229. else if (attr == &ttm_page_pool_alloc_size)
  230. val = m->options.alloc_size;
  231. val = val * (PAGE_SIZE >> 10);
  232. return snprintf(buffer, PAGE_SIZE, "%u\n", val);
  233. }
  234. static const struct sysfs_ops ttm_pool_sysfs_ops = {
  235. .show = &ttm_pool_show,
  236. .store = &ttm_pool_store,
  237. };
  238. static struct kobj_type ttm_pool_kobj_type = {
  239. .release = &ttm_pool_kobj_release,
  240. .sysfs_ops = &ttm_pool_sysfs_ops,
  241. .default_attrs = ttm_pool_attrs,
  242. };
  243. #ifndef CONFIG_X86
  244. static int set_pages_array_wb(struct page **pages, int addrinarray)
  245. {
  246. #if IS_ENABLED(CONFIG_AGP)
  247. int i;
  248. for (i = 0; i < addrinarray; i++)
  249. unmap_page_from_agp(pages[i]);
  250. #endif
  251. return 0;
  252. }
  253. static int set_pages_array_wc(struct page **pages, int addrinarray)
  254. {
  255. #if IS_ENABLED(CONFIG_AGP)
  256. int i;
  257. for (i = 0; i < addrinarray; i++)
  258. map_page_into_agp(pages[i]);
  259. #endif
  260. return 0;
  261. }
  262. static int set_pages_array_uc(struct page **pages, int addrinarray)
  263. {
  264. #if IS_ENABLED(CONFIG_AGP)
  265. int i;
  266. for (i = 0; i < addrinarray; i++)
  267. map_page_into_agp(pages[i]);
  268. #endif
  269. return 0;
  270. }
  271. #endif /* for !CONFIG_X86 */
  272. static int ttm_set_pages_caching(struct dma_pool *pool,
  273. struct page **pages, unsigned cpages)
  274. {
  275. int r = 0;
  276. /* Set page caching */
  277. if (pool->type & IS_UC) {
  278. r = set_pages_array_uc(pages, cpages);
  279. if (r)
  280. pr_err("%s: Failed to set %d pages to uc!\n",
  281. pool->dev_name, cpages);
  282. }
  283. if (pool->type & IS_WC) {
  284. r = set_pages_array_wc(pages, cpages);
  285. if (r)
  286. pr_err("%s: Failed to set %d pages to wc!\n",
  287. pool->dev_name, cpages);
  288. }
  289. return r;
  290. }
  291. static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
  292. {
  293. dma_addr_t dma = d_page->dma;
  294. d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL;
  295. dma_free_coherent(pool->dev, pool->size, (void *)d_page->vaddr, dma);
  296. kfree(d_page);
  297. d_page = NULL;
  298. }
  299. static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
  300. {
  301. struct dma_page *d_page;
  302. unsigned long attrs = 0;
  303. void *vaddr;
  304. d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
  305. if (!d_page)
  306. return NULL;
  307. if (pool->type & IS_HUGE)
  308. attrs = DMA_ATTR_NO_WARN;
  309. vaddr = dma_alloc_attrs(pool->dev, pool->size, &d_page->dma,
  310. pool->gfp_flags, attrs);
  311. if (vaddr) {
  312. if (is_vmalloc_addr(vaddr))
  313. d_page->p = vmalloc_to_page(vaddr);
  314. else
  315. d_page->p = virt_to_page(vaddr);
  316. d_page->vaddr = (unsigned long)vaddr;
  317. if (pool->type & IS_HUGE)
  318. d_page->vaddr |= VADDR_FLAG_HUGE_POOL;
  319. } else {
  320. kfree(d_page);
  321. d_page = NULL;
  322. }
  323. return d_page;
  324. }
  325. static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
  326. {
  327. enum pool_type type = IS_UNDEFINED;
  328. if (flags & TTM_PAGE_FLAG_DMA32)
  329. type |= IS_DMA32;
  330. if (cstate == tt_cached)
  331. type |= IS_CACHED;
  332. else if (cstate == tt_uncached)
  333. type |= IS_UC;
  334. else
  335. type |= IS_WC;
  336. return type;
  337. }
  338. static void ttm_pool_update_free_locked(struct dma_pool *pool,
  339. unsigned freed_pages)
  340. {
  341. pool->npages_free -= freed_pages;
  342. pool->nfrees += freed_pages;
  343. }
  344. /* set memory back to wb and free the pages. */
  345. static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
  346. {
  347. struct page *page = d_page->p;
  348. unsigned i, num_pages;
  349. /* Don't set WB on WB page pool. */
  350. if (!(pool->type & IS_CACHED)) {
  351. num_pages = pool->size / PAGE_SIZE;
  352. for (i = 0; i < num_pages; ++i, ++page) {
  353. if (set_pages_array_wb(&page, 1)) {
  354. pr_err("%s: Failed to set %d pages to wb!\n",
  355. pool->dev_name, 1);
  356. }
  357. }
  358. }
  359. list_del(&d_page->page_list);
  360. __ttm_dma_free_page(pool, d_page);
  361. }
  362. static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
  363. struct page *pages[], unsigned npages)
  364. {
  365. struct dma_page *d_page, *tmp;
  366. if (pool->type & IS_HUGE) {
  367. list_for_each_entry_safe(d_page, tmp, d_pages, page_list)
  368. ttm_dma_page_put(pool, d_page);
  369. return;
  370. }
  371. /* Don't set WB on WB page pool. */
  372. if (npages && !(pool->type & IS_CACHED) &&
  373. set_pages_array_wb(pages, npages))
  374. pr_err("%s: Failed to set %d pages to wb!\n",
  375. pool->dev_name, npages);
  376. list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
  377. list_del(&d_page->page_list);
  378. __ttm_dma_free_page(pool, d_page);
  379. }
  380. }
  381. /*
  382. * Free pages from pool.
  383. *
  384. * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
  385. * number of pages in one go.
  386. *
  387. * @pool: to free the pages from
  388. * @nr_free: If set to true will free all pages in pool
  389. * @use_static: Safe to use static buffer
  390. **/
  391. static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
  392. bool use_static)
  393. {
  394. static struct page *static_buf[NUM_PAGES_TO_ALLOC];
  395. unsigned long irq_flags;
  396. struct dma_page *dma_p, *tmp;
  397. struct page **pages_to_free;
  398. struct list_head d_pages;
  399. unsigned freed_pages = 0,
  400. npages_to_free = nr_free;
  401. if (NUM_PAGES_TO_ALLOC < nr_free)
  402. npages_to_free = NUM_PAGES_TO_ALLOC;
  403. #if 0
  404. if (nr_free > 1) {
  405. pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
  406. pool->dev_name, pool->name, current->pid,
  407. npages_to_free, nr_free);
  408. }
  409. #endif
  410. if (use_static)
  411. pages_to_free = static_buf;
  412. else
  413. pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
  414. GFP_KERNEL);
  415. if (!pages_to_free) {
  416. pr_debug("%s: Failed to allocate memory for pool free operation\n",
  417. pool->dev_name);
  418. return 0;
  419. }
  420. INIT_LIST_HEAD(&d_pages);
  421. restart:
  422. spin_lock_irqsave(&pool->lock, irq_flags);
  423. /* We picking the oldest ones off the list */
  424. list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
  425. page_list) {
  426. if (freed_pages >= npages_to_free)
  427. break;
  428. /* Move the dma_page from one list to another. */
  429. list_move(&dma_p->page_list, &d_pages);
  430. pages_to_free[freed_pages++] = dma_p->p;
  431. /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
  432. if (freed_pages >= NUM_PAGES_TO_ALLOC) {
  433. ttm_pool_update_free_locked(pool, freed_pages);
  434. /**
  435. * Because changing page caching is costly
  436. * we unlock the pool to prevent stalling.
  437. */
  438. spin_unlock_irqrestore(&pool->lock, irq_flags);
  439. ttm_dma_pages_put(pool, &d_pages, pages_to_free,
  440. freed_pages);
  441. INIT_LIST_HEAD(&d_pages);
  442. if (likely(nr_free != FREE_ALL_PAGES))
  443. nr_free -= freed_pages;
  444. if (NUM_PAGES_TO_ALLOC >= nr_free)
  445. npages_to_free = nr_free;
  446. else
  447. npages_to_free = NUM_PAGES_TO_ALLOC;
  448. freed_pages = 0;
  449. /* free all so restart the processing */
  450. if (nr_free)
  451. goto restart;
  452. /* Not allowed to fall through or break because
  453. * following context is inside spinlock while we are
  454. * outside here.
  455. */
  456. goto out;
  457. }
  458. }
  459. /* remove range of pages from the pool */
  460. if (freed_pages) {
  461. ttm_pool_update_free_locked(pool, freed_pages);
  462. nr_free -= freed_pages;
  463. }
  464. spin_unlock_irqrestore(&pool->lock, irq_flags);
  465. if (freed_pages)
  466. ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
  467. out:
  468. if (pages_to_free != static_buf)
  469. kfree(pages_to_free);
  470. return nr_free;
  471. }
  472. static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
  473. {
  474. struct device_pools *p;
  475. struct dma_pool *pool;
  476. if (!dev)
  477. return;
  478. mutex_lock(&_manager->lock);
  479. list_for_each_entry_reverse(p, &_manager->pools, pools) {
  480. if (p->dev != dev)
  481. continue;
  482. pool = p->pool;
  483. if (pool->type != type)
  484. continue;
  485. list_del(&p->pools);
  486. kfree(p);
  487. _manager->npools--;
  488. break;
  489. }
  490. list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
  491. if (pool->type != type)
  492. continue;
  493. /* Takes a spinlock.. */
  494. /* OK to use static buffer since global mutex is held. */
  495. ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true);
  496. WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
  497. /* This code path is called after _all_ references to the
  498. * struct device has been dropped - so nobody should be
  499. * touching it. In case somebody is trying to _add_ we are
  500. * guarded by the mutex. */
  501. list_del(&pool->pools);
  502. kfree(pool);
  503. break;
  504. }
  505. mutex_unlock(&_manager->lock);
  506. }
  507. /*
  508. * On free-ing of the 'struct device' this deconstructor is run.
  509. * Albeit the pool might have already been freed earlier.
  510. */
  511. static void ttm_dma_pool_release(struct device *dev, void *res)
  512. {
  513. struct dma_pool *pool = *(struct dma_pool **)res;
  514. if (pool)
  515. ttm_dma_free_pool(dev, pool->type);
  516. }
  517. static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
  518. {
  519. return *(struct dma_pool **)res == match_data;
  520. }
  521. static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
  522. enum pool_type type)
  523. {
  524. const char *n[] = {"wc", "uc", "cached", " dma32", "huge"};
  525. enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_HUGE};
  526. struct device_pools *sec_pool = NULL;
  527. struct dma_pool *pool = NULL, **ptr;
  528. unsigned i;
  529. int ret = -ENODEV;
  530. char *p;
  531. if (!dev)
  532. return NULL;
  533. ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
  534. if (!ptr)
  535. return NULL;
  536. ret = -ENOMEM;
  537. pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
  538. dev_to_node(dev));
  539. if (!pool)
  540. goto err_mem;
  541. sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
  542. dev_to_node(dev));
  543. if (!sec_pool)
  544. goto err_mem;
  545. INIT_LIST_HEAD(&sec_pool->pools);
  546. sec_pool->dev = dev;
  547. sec_pool->pool = pool;
  548. INIT_LIST_HEAD(&pool->free_list);
  549. INIT_LIST_HEAD(&pool->pools);
  550. spin_lock_init(&pool->lock);
  551. pool->dev = dev;
  552. pool->npages_free = pool->npages_in_use = 0;
  553. pool->nfrees = 0;
  554. pool->gfp_flags = flags;
  555. if (type & IS_HUGE)
  556. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  557. pool->size = HPAGE_PMD_SIZE;
  558. #else
  559. BUG();
  560. #endif
  561. else
  562. pool->size = PAGE_SIZE;
  563. pool->type = type;
  564. pool->nrefills = 0;
  565. p = pool->name;
  566. for (i = 0; i < ARRAY_SIZE(t); i++) {
  567. if (type & t[i]) {
  568. p += snprintf(p, sizeof(pool->name) - (p - pool->name),
  569. "%s", n[i]);
  570. }
  571. }
  572. *p = 0;
  573. /* We copy the name for pr_ calls b/c when dma_pool_destroy is called
  574. * - the kobj->name has already been deallocated.*/
  575. snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
  576. dev_driver_string(dev), dev_name(dev));
  577. mutex_lock(&_manager->lock);
  578. /* You can get the dma_pool from either the global: */
  579. list_add(&sec_pool->pools, &_manager->pools);
  580. _manager->npools++;
  581. /* or from 'struct device': */
  582. list_add(&pool->pools, &dev->dma_pools);
  583. mutex_unlock(&_manager->lock);
  584. *ptr = pool;
  585. devres_add(dev, ptr);
  586. return pool;
  587. err_mem:
  588. devres_free(ptr);
  589. kfree(sec_pool);
  590. kfree(pool);
  591. return ERR_PTR(ret);
  592. }
  593. static struct dma_pool *ttm_dma_find_pool(struct device *dev,
  594. enum pool_type type)
  595. {
  596. struct dma_pool *pool, *tmp;
  597. if (type == IS_UNDEFINED)
  598. return NULL;
  599. /* NB: We iterate on the 'struct dev' which has no spinlock, but
  600. * it does have a kref which we have taken. The kref is taken during
  601. * graphic driver loading - in the drm_pci_init it calls either
  602. * pci_dev_get or pci_register_driver which both end up taking a kref
  603. * on 'struct device'.
  604. *
  605. * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
  606. * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
  607. * thing is at that point of time there are no pages associated with the
  608. * driver so this function will not be called.
  609. */
  610. list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools)
  611. if (pool->type == type)
  612. return pool;
  613. return NULL;
  614. }
  615. /*
  616. * Free pages the pages that failed to change the caching state. If there
  617. * are pages that have changed their caching state already put them to the
  618. * pool.
  619. */
  620. static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
  621. struct list_head *d_pages,
  622. struct page **failed_pages,
  623. unsigned cpages)
  624. {
  625. struct dma_page *d_page, *tmp;
  626. struct page *p;
  627. unsigned i = 0;
  628. p = failed_pages[0];
  629. if (!p)
  630. return;
  631. /* Find the failed page. */
  632. list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
  633. if (d_page->p != p)
  634. continue;
  635. /* .. and then progress over the full list. */
  636. list_del(&d_page->page_list);
  637. __ttm_dma_free_page(pool, d_page);
  638. if (++i < cpages)
  639. p = failed_pages[i];
  640. else
  641. break;
  642. }
  643. }
  644. /*
  645. * Allocate 'count' pages, and put 'need' number of them on the
  646. * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
  647. * The full list of pages should also be on 'd_pages'.
  648. * We return zero for success, and negative numbers as errors.
  649. */
  650. static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
  651. struct list_head *d_pages,
  652. unsigned count)
  653. {
  654. struct page **caching_array;
  655. struct dma_page *dma_p;
  656. struct page *p;
  657. int r = 0;
  658. unsigned i, j, npages, cpages;
  659. unsigned max_cpages = min(count,
  660. (unsigned)(PAGE_SIZE/sizeof(struct page *)));
  661. /* allocate array for page caching change */
  662. caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
  663. if (!caching_array) {
  664. pr_debug("%s: Unable to allocate table for new pages\n",
  665. pool->dev_name);
  666. return -ENOMEM;
  667. }
  668. if (count > 1)
  669. pr_debug("%s: (%s:%d) Getting %d pages\n",
  670. pool->dev_name, pool->name, current->pid, count);
  671. for (i = 0, cpages = 0; i < count; ++i) {
  672. dma_p = __ttm_dma_alloc_page(pool);
  673. if (!dma_p) {
  674. pr_debug("%s: Unable to get page %u\n",
  675. pool->dev_name, i);
  676. /* store already allocated pages in the pool after
  677. * setting the caching state */
  678. if (cpages) {
  679. r = ttm_set_pages_caching(pool, caching_array,
  680. cpages);
  681. if (r)
  682. ttm_dma_handle_caching_state_failure(
  683. pool, d_pages, caching_array,
  684. cpages);
  685. }
  686. r = -ENOMEM;
  687. goto out;
  688. }
  689. p = dma_p->p;
  690. list_add(&dma_p->page_list, d_pages);
  691. #ifdef CONFIG_HIGHMEM
  692. /* gfp flags of highmem page should never be dma32 so we
  693. * we should be fine in such case
  694. */
  695. if (PageHighMem(p))
  696. continue;
  697. #endif
  698. npages = pool->size / PAGE_SIZE;
  699. for (j = 0; j < npages; ++j) {
  700. caching_array[cpages++] = p + j;
  701. if (cpages == max_cpages) {
  702. /* Note: Cannot hold the spinlock */
  703. r = ttm_set_pages_caching(pool, caching_array,
  704. cpages);
  705. if (r) {
  706. ttm_dma_handle_caching_state_failure(
  707. pool, d_pages, caching_array,
  708. cpages);
  709. goto out;
  710. }
  711. cpages = 0;
  712. }
  713. }
  714. }
  715. if (cpages) {
  716. r = ttm_set_pages_caching(pool, caching_array, cpages);
  717. if (r)
  718. ttm_dma_handle_caching_state_failure(pool, d_pages,
  719. caching_array, cpages);
  720. }
  721. out:
  722. kfree(caching_array);
  723. return r;
  724. }
  725. /*
  726. * @return count of pages still required to fulfill the request.
  727. */
  728. static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
  729. unsigned long *irq_flags)
  730. {
  731. unsigned count = _manager->options.small;
  732. int r = pool->npages_free;
  733. if (count > pool->npages_free) {
  734. struct list_head d_pages;
  735. INIT_LIST_HEAD(&d_pages);
  736. spin_unlock_irqrestore(&pool->lock, *irq_flags);
  737. /* Returns how many more are neccessary to fulfill the
  738. * request. */
  739. r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
  740. spin_lock_irqsave(&pool->lock, *irq_flags);
  741. if (!r) {
  742. /* Add the fresh to the end.. */
  743. list_splice(&d_pages, &pool->free_list);
  744. ++pool->nrefills;
  745. pool->npages_free += count;
  746. r = count;
  747. } else {
  748. struct dma_page *d_page;
  749. unsigned cpages = 0;
  750. pr_debug("%s: Failed to fill %s pool (r:%d)!\n",
  751. pool->dev_name, pool->name, r);
  752. list_for_each_entry(d_page, &d_pages, page_list) {
  753. cpages++;
  754. }
  755. list_splice_tail(&d_pages, &pool->free_list);
  756. pool->npages_free += cpages;
  757. r = cpages;
  758. }
  759. }
  760. return r;
  761. }
  762. /*
  763. * The populate list is actually a stack (not that is matters as TTM
  764. * allocates one page at a time.
  765. * return dma_page pointer if success, otherwise NULL.
  766. */
  767. static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool,
  768. struct ttm_dma_tt *ttm_dma,
  769. unsigned index)
  770. {
  771. struct dma_page *d_page = NULL;
  772. struct ttm_tt *ttm = &ttm_dma->ttm;
  773. unsigned long irq_flags;
  774. int count;
  775. spin_lock_irqsave(&pool->lock, irq_flags);
  776. count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
  777. if (count) {
  778. d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
  779. ttm->pages[index] = d_page->p;
  780. ttm_dma->dma_address[index] = d_page->dma;
  781. list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
  782. pool->npages_in_use += 1;
  783. pool->npages_free -= 1;
  784. }
  785. spin_unlock_irqrestore(&pool->lock, irq_flags);
  786. return d_page;
  787. }
  788. static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
  789. {
  790. struct ttm_tt *ttm = &ttm_dma->ttm;
  791. gfp_t gfp_flags;
  792. if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
  793. gfp_flags = GFP_USER | GFP_DMA32;
  794. else
  795. gfp_flags = GFP_HIGHUSER;
  796. if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
  797. gfp_flags |= __GFP_ZERO;
  798. if (huge) {
  799. gfp_flags |= GFP_TRANSHUGE;
  800. gfp_flags &= ~__GFP_MOVABLE;
  801. gfp_flags &= ~__GFP_COMP;
  802. }
  803. if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
  804. gfp_flags |= __GFP_RETRY_MAYFAIL;
  805. return gfp_flags;
  806. }
  807. /*
  808. * On success pages list will hold count number of correctly
  809. * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
  810. */
  811. int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
  812. struct ttm_operation_ctx *ctx)
  813. {
  814. struct ttm_tt *ttm = &ttm_dma->ttm;
  815. struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
  816. unsigned long num_pages = ttm->num_pages;
  817. struct dma_pool *pool;
  818. struct dma_page *d_page;
  819. enum pool_type type;
  820. unsigned i;
  821. int ret;
  822. if (ttm->state != tt_unpopulated)
  823. return 0;
  824. if (ttm_check_under_lowerlimit(mem_glob, num_pages, ctx))
  825. return -ENOMEM;
  826. INIT_LIST_HEAD(&ttm_dma->pages_list);
  827. i = 0;
  828. type = ttm_to_type(ttm->page_flags, ttm->caching_state);
  829. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  830. if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
  831. goto skip_huge;
  832. pool = ttm_dma_find_pool(dev, type | IS_HUGE);
  833. if (!pool) {
  834. gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, true);
  835. pool = ttm_dma_pool_init(dev, gfp_flags, type | IS_HUGE);
  836. if (IS_ERR_OR_NULL(pool))
  837. goto skip_huge;
  838. }
  839. while (num_pages >= HPAGE_PMD_NR) {
  840. unsigned j;
  841. d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
  842. if (!d_page)
  843. break;
  844. ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
  845. pool->size, ctx);
  846. if (unlikely(ret != 0)) {
  847. ttm_dma_unpopulate(ttm_dma, dev);
  848. return -ENOMEM;
  849. }
  850. d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
  851. for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) {
  852. ttm->pages[j] = ttm->pages[j - 1] + 1;
  853. ttm_dma->dma_address[j] = ttm_dma->dma_address[j - 1] +
  854. PAGE_SIZE;
  855. }
  856. i += HPAGE_PMD_NR;
  857. num_pages -= HPAGE_PMD_NR;
  858. }
  859. skip_huge:
  860. #endif
  861. pool = ttm_dma_find_pool(dev, type);
  862. if (!pool) {
  863. gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, false);
  864. pool = ttm_dma_pool_init(dev, gfp_flags, type);
  865. if (IS_ERR_OR_NULL(pool))
  866. return -ENOMEM;
  867. }
  868. while (num_pages) {
  869. d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
  870. if (!d_page) {
  871. ttm_dma_unpopulate(ttm_dma, dev);
  872. return -ENOMEM;
  873. }
  874. ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
  875. pool->size, ctx);
  876. if (unlikely(ret != 0)) {
  877. ttm_dma_unpopulate(ttm_dma, dev);
  878. return -ENOMEM;
  879. }
  880. d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
  881. ++i;
  882. --num_pages;
  883. }
  884. if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
  885. ret = ttm_tt_swapin(ttm);
  886. if (unlikely(ret != 0)) {
  887. ttm_dma_unpopulate(ttm_dma, dev);
  888. return ret;
  889. }
  890. }
  891. ttm->state = tt_unbound;
  892. return 0;
  893. }
  894. EXPORT_SYMBOL_GPL(ttm_dma_populate);
  895. /* Put all pages in pages list to correct pool to wait for reuse */
  896. void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
  897. {
  898. struct ttm_tt *ttm = &ttm_dma->ttm;
  899. struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
  900. struct dma_pool *pool;
  901. struct dma_page *d_page, *next;
  902. enum pool_type type;
  903. bool is_cached = false;
  904. unsigned count, i, npages = 0;
  905. unsigned long irq_flags;
  906. type = ttm_to_type(ttm->page_flags, ttm->caching_state);
  907. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  908. pool = ttm_dma_find_pool(dev, type | IS_HUGE);
  909. if (pool) {
  910. count = 0;
  911. list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
  912. page_list) {
  913. if (!(d_page->vaddr & VADDR_FLAG_HUGE_POOL))
  914. continue;
  915. count++;
  916. if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
  917. ttm_mem_global_free_page(mem_glob, d_page->p,
  918. pool->size);
  919. d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
  920. }
  921. ttm_dma_page_put(pool, d_page);
  922. }
  923. spin_lock_irqsave(&pool->lock, irq_flags);
  924. pool->npages_in_use -= count;
  925. pool->nfrees += count;
  926. spin_unlock_irqrestore(&pool->lock, irq_flags);
  927. }
  928. #endif
  929. pool = ttm_dma_find_pool(dev, type);
  930. if (!pool)
  931. return;
  932. is_cached = (ttm_dma_find_pool(pool->dev,
  933. ttm_to_type(ttm->page_flags, tt_cached)) == pool);
  934. /* make sure pages array match list and count number of pages */
  935. count = 0;
  936. list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
  937. page_list) {
  938. ttm->pages[count] = d_page->p;
  939. count++;
  940. if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
  941. ttm_mem_global_free_page(mem_glob, d_page->p,
  942. pool->size);
  943. d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
  944. }
  945. if (is_cached)
  946. ttm_dma_page_put(pool, d_page);
  947. }
  948. spin_lock_irqsave(&pool->lock, irq_flags);
  949. pool->npages_in_use -= count;
  950. if (is_cached) {
  951. pool->nfrees += count;
  952. } else {
  953. pool->npages_free += count;
  954. list_splice(&ttm_dma->pages_list, &pool->free_list);
  955. /*
  956. * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages
  957. * to free in order to minimize calls to set_memory_wb().
  958. */
  959. if (pool->npages_free >= (_manager->options.max_size +
  960. NUM_PAGES_TO_ALLOC))
  961. npages = pool->npages_free - _manager->options.max_size;
  962. }
  963. spin_unlock_irqrestore(&pool->lock, irq_flags);
  964. INIT_LIST_HEAD(&ttm_dma->pages_list);
  965. for (i = 0; i < ttm->num_pages; i++) {
  966. ttm->pages[i] = NULL;
  967. ttm_dma->dma_address[i] = 0;
  968. }
  969. /* shrink pool if necessary (only on !is_cached pools)*/
  970. if (npages)
  971. ttm_dma_page_pool_free(pool, npages, false);
  972. ttm->state = tt_unpopulated;
  973. }
  974. EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
  975. /**
  976. * Callback for mm to request pool to reduce number of page held.
  977. *
  978. * XXX: (dchinner) Deadlock warning!
  979. *
  980. * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
  981. * shrinkers
  982. */
  983. static unsigned long
  984. ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
  985. {
  986. static unsigned start_pool;
  987. unsigned idx = 0;
  988. unsigned pool_offset;
  989. unsigned shrink_pages = sc->nr_to_scan;
  990. struct device_pools *p;
  991. unsigned long freed = 0;
  992. if (list_empty(&_manager->pools))
  993. return SHRINK_STOP;
  994. if (!mutex_trylock(&_manager->lock))
  995. return SHRINK_STOP;
  996. if (!_manager->npools)
  997. goto out;
  998. pool_offset = ++start_pool % _manager->npools;
  999. list_for_each_entry(p, &_manager->pools, pools) {
  1000. unsigned nr_free;
  1001. if (!p->dev)
  1002. continue;
  1003. if (shrink_pages == 0)
  1004. break;
  1005. /* Do it in round-robin fashion. */
  1006. if (++idx < pool_offset)
  1007. continue;
  1008. nr_free = shrink_pages;
  1009. /* OK to use static buffer since global mutex is held. */
  1010. shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
  1011. freed += nr_free - shrink_pages;
  1012. pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
  1013. p->pool->dev_name, p->pool->name, current->pid,
  1014. nr_free, shrink_pages);
  1015. }
  1016. out:
  1017. mutex_unlock(&_manager->lock);
  1018. return freed;
  1019. }
  1020. static unsigned long
  1021. ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
  1022. {
  1023. struct device_pools *p;
  1024. unsigned long count = 0;
  1025. if (!mutex_trylock(&_manager->lock))
  1026. return 0;
  1027. list_for_each_entry(p, &_manager->pools, pools)
  1028. count += p->pool->npages_free;
  1029. mutex_unlock(&_manager->lock);
  1030. return count;
  1031. }
  1032. static int ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
  1033. {
  1034. manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
  1035. manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
  1036. manager->mm_shrink.seeks = 1;
  1037. return register_shrinker(&manager->mm_shrink);
  1038. }
  1039. static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
  1040. {
  1041. unregister_shrinker(&manager->mm_shrink);
  1042. }
  1043. int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
  1044. {
  1045. int ret;
  1046. WARN_ON(_manager);
  1047. pr_info("Initializing DMA pool allocator\n");
  1048. _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
  1049. if (!_manager)
  1050. return -ENOMEM;
  1051. mutex_init(&_manager->lock);
  1052. INIT_LIST_HEAD(&_manager->pools);
  1053. _manager->options.max_size = max_pages;
  1054. _manager->options.small = SMALL_ALLOCATION;
  1055. _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
  1056. /* This takes care of auto-freeing the _manager */
  1057. ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
  1058. &glob->kobj, "dma_pool");
  1059. if (unlikely(ret != 0))
  1060. goto error;
  1061. ret = ttm_dma_pool_mm_shrink_init(_manager);
  1062. if (unlikely(ret != 0))
  1063. goto error;
  1064. return 0;
  1065. error:
  1066. kobject_put(&_manager->kobj);
  1067. _manager = NULL;
  1068. return ret;
  1069. }
  1070. void ttm_dma_page_alloc_fini(void)
  1071. {
  1072. struct device_pools *p, *t;
  1073. pr_info("Finalizing DMA pool allocator\n");
  1074. ttm_dma_pool_mm_shrink_fini(_manager);
  1075. list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
  1076. dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
  1077. current->pid);
  1078. WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
  1079. ttm_dma_pool_match, p->pool));
  1080. ttm_dma_free_pool(p->dev, p->pool->type);
  1081. }
  1082. kobject_put(&_manager->kobj);
  1083. _manager = NULL;
  1084. }
  1085. int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
  1086. {
  1087. struct device_pools *p;
  1088. struct dma_pool *pool = NULL;
  1089. if (!_manager) {
  1090. seq_printf(m, "No pool allocator running.\n");
  1091. return 0;
  1092. }
  1093. seq_printf(m, " pool refills pages freed inuse available name\n");
  1094. mutex_lock(&_manager->lock);
  1095. list_for_each_entry(p, &_manager->pools, pools) {
  1096. struct device *dev = p->dev;
  1097. if (!dev)
  1098. continue;
  1099. pool = p->pool;
  1100. seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
  1101. pool->name, pool->nrefills,
  1102. pool->nfrees, pool->npages_in_use,
  1103. pool->npages_free,
  1104. pool->dev_name);
  1105. }
  1106. mutex_unlock(&_manager->lock);
  1107. return 0;
  1108. }
  1109. EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
  1110. #endif