ttm_page_alloc_dma.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273
  1. /*
  2. * Copyright 2011 (c) Oracle Corp.
  3. * Permission is hereby granted, free of charge, to any person obtaining a
  4. * copy of this software and associated documentation files (the "Software"),
  5. * to deal in the Software without restriction, including without limitation
  6. * the rights to use, copy, modify, merge, publish, distribute, sub license,
  7. * and/or sell copies of the Software, and to permit persons to whom the
  8. * Software is furnished to do so, subject to the following conditions:
  9. *
  10. * The above copyright notice and this permission notice (including the
  11. * next paragraph) shall be included in all copies or substantial portions
  12. * of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  17. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  19. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  20. * DEALINGS IN THE SOFTWARE.
  21. *
  22. * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
  23. */
  24. /*
  25. * A simple DMA pool losely based on dmapool.c. It has certain advantages
  26. * over the DMA pools:
  27. * - Pool collects resently freed pages for reuse (and hooks up to
  28. * the shrinker).
  29. * - Tracks currently in use pages
  30. * - Tracks whether the page is UC, WB or cached (and reverts to WB
  31. * when freed).
  32. */
  33. #if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
  34. #define pr_fmt(fmt) "[TTM] " fmt
  35. #include <linux/dma-mapping.h>
  36. #include <linux/list.h>
  37. #include <linux/seq_file.h> /* for seq_printf */
  38. #include <linux/slab.h>
  39. #include <linux/spinlock.h>
  40. #include <linux/highmem.h>
  41. #include <linux/mm_types.h>
  42. #include <linux/module.h>
  43. #include <linux/mm.h>
  44. #include <linux/atomic.h>
  45. #include <linux/device.h>
  46. #include <linux/kthread.h>
  47. #include <drm/ttm/ttm_bo_driver.h>
  48. #include <drm/ttm/ttm_page_alloc.h>
  49. #if IS_ENABLED(CONFIG_AGP)
  50. #include <asm/agp.h>
  51. #endif
  52. #ifdef CONFIG_X86
  53. #include <asm/set_memory.h>
  54. #endif
  55. #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
  56. #define SMALL_ALLOCATION 4
  57. #define FREE_ALL_PAGES (~0U)
  58. #define VADDR_FLAG_HUGE_POOL 1UL
  59. enum pool_type {
  60. IS_UNDEFINED = 0,
  61. IS_WC = 1 << 1,
  62. IS_UC = 1 << 2,
  63. IS_CACHED = 1 << 3,
  64. IS_DMA32 = 1 << 4,
  65. IS_HUGE = 1 << 5
  66. };
  67. /*
  68. * The pool structure. There are up to nine pools:
  69. * - generic (not restricted to DMA32):
  70. * - write combined, uncached, cached.
  71. * - dma32 (up to 2^32 - so up 4GB):
  72. * - write combined, uncached, cached.
  73. * - huge (not restricted to DMA32):
  74. * - write combined, uncached, cached.
  75. * for each 'struct device'. The 'cached' is for pages that are actively used.
  76. * The other ones can be shrunk by the shrinker API if neccessary.
  77. * @pools: The 'struct device->dma_pools' link.
  78. * @type: Type of the pool
  79. * @lock: Protects the free_list from concurrnet access. Must be
  80. * used with irqsave/irqrestore variants because pool allocator maybe called
  81. * from delayed work.
  82. * @free_list: Pool of pages that are free to be used. No order requirements.
  83. * @dev: The device that is associated with these pools.
  84. * @size: Size used during DMA allocation.
  85. * @npages_free: Count of available pages for re-use.
  86. * @npages_in_use: Count of pages that are in use.
  87. * @nfrees: Stats when pool is shrinking.
  88. * @nrefills: Stats when the pool is grown.
  89. * @gfp_flags: Flags to pass for alloc_page.
  90. * @name: Name of the pool.
  91. * @dev_name: Name derieved from dev - similar to how dev_info works.
  92. * Used during shutdown as the dev_info during release is unavailable.
  93. */
  94. struct dma_pool {
  95. struct list_head pools; /* The 'struct device->dma_pools link */
  96. enum pool_type type;
  97. spinlock_t lock;
  98. struct list_head free_list;
  99. struct device *dev;
  100. unsigned size;
  101. unsigned npages_free;
  102. unsigned npages_in_use;
  103. unsigned long nfrees; /* Stats when shrunk. */
  104. unsigned long nrefills; /* Stats when grown. */
  105. gfp_t gfp_flags;
  106. char name[13]; /* "cached dma32" */
  107. char dev_name[64]; /* Constructed from dev */
  108. };
  109. /*
  110. * The accounting page keeping track of the allocated page along with
  111. * the DMA address.
  112. * @page_list: The link to the 'page_list' in 'struct dma_pool'.
  113. * @vaddr: The virtual address of the page and a flag if the page belongs to a
  114. * huge pool
  115. * @dma: The bus address of the page. If the page is not allocated
  116. * via the DMA API, it will be -1.
  117. */
  118. struct dma_page {
  119. struct list_head page_list;
  120. unsigned long vaddr;
  121. struct page *p;
  122. dma_addr_t dma;
  123. };
  124. /*
  125. * Limits for the pool. They are handled without locks because only place where
  126. * they may change is in sysfs store. They won't have immediate effect anyway
  127. * so forcing serialization to access them is pointless.
  128. */
  129. struct ttm_pool_opts {
  130. unsigned alloc_size;
  131. unsigned max_size;
  132. unsigned small;
  133. };
  134. /*
  135. * Contains the list of all of the 'struct device' and their corresponding
  136. * DMA pools. Guarded by _mutex->lock.
  137. * @pools: The link to 'struct ttm_pool_manager->pools'
  138. * @dev: The 'struct device' associated with the 'pool'
  139. * @pool: The 'struct dma_pool' associated with the 'dev'
  140. */
  141. struct device_pools {
  142. struct list_head pools;
  143. struct device *dev;
  144. struct dma_pool *pool;
  145. };
  146. /*
  147. * struct ttm_pool_manager - Holds memory pools for fast allocation
  148. *
  149. * @lock: Lock used when adding/removing from pools
  150. * @pools: List of 'struct device' and 'struct dma_pool' tuples.
  151. * @options: Limits for the pool.
  152. * @npools: Total amount of pools in existence.
  153. * @shrinker: The structure used by [un|]register_shrinker
  154. */
  155. struct ttm_pool_manager {
  156. struct mutex lock;
  157. struct list_head pools;
  158. struct ttm_pool_opts options;
  159. unsigned npools;
  160. struct shrinker mm_shrink;
  161. struct kobject kobj;
  162. };
  163. static struct ttm_pool_manager *_manager;
  164. static struct attribute ttm_page_pool_max = {
  165. .name = "pool_max_size",
  166. .mode = S_IRUGO | S_IWUSR
  167. };
  168. static struct attribute ttm_page_pool_small = {
  169. .name = "pool_small_allocation",
  170. .mode = S_IRUGO | S_IWUSR
  171. };
  172. static struct attribute ttm_page_pool_alloc_size = {
  173. .name = "pool_allocation_size",
  174. .mode = S_IRUGO | S_IWUSR
  175. };
  176. static struct attribute *ttm_pool_attrs[] = {
  177. &ttm_page_pool_max,
  178. &ttm_page_pool_small,
  179. &ttm_page_pool_alloc_size,
  180. NULL
  181. };
  182. static void ttm_pool_kobj_release(struct kobject *kobj)
  183. {
  184. struct ttm_pool_manager *m =
  185. container_of(kobj, struct ttm_pool_manager, kobj);
  186. kfree(m);
  187. }
  188. static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
  189. const char *buffer, size_t size)
  190. {
  191. struct ttm_pool_manager *m =
  192. container_of(kobj, struct ttm_pool_manager, kobj);
  193. int chars;
  194. unsigned val;
  195. chars = sscanf(buffer, "%u", &val);
  196. if (chars == 0)
  197. return size;
  198. /* Convert kb to number of pages */
  199. val = val / (PAGE_SIZE >> 10);
  200. if (attr == &ttm_page_pool_max)
  201. m->options.max_size = val;
  202. else if (attr == &ttm_page_pool_small)
  203. m->options.small = val;
  204. else if (attr == &ttm_page_pool_alloc_size) {
  205. if (val > NUM_PAGES_TO_ALLOC*8) {
  206. pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
  207. NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
  208. NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
  209. return size;
  210. } else if (val > NUM_PAGES_TO_ALLOC) {
  211. pr_warn("Setting allocation size to larger than %lu is not recommended\n",
  212. NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
  213. }
  214. m->options.alloc_size = val;
  215. }
  216. return size;
  217. }
  218. static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
  219. char *buffer)
  220. {
  221. struct ttm_pool_manager *m =
  222. container_of(kobj, struct ttm_pool_manager, kobj);
  223. unsigned val = 0;
  224. if (attr == &ttm_page_pool_max)
  225. val = m->options.max_size;
  226. else if (attr == &ttm_page_pool_small)
  227. val = m->options.small;
  228. else if (attr == &ttm_page_pool_alloc_size)
  229. val = m->options.alloc_size;
  230. val = val * (PAGE_SIZE >> 10);
  231. return snprintf(buffer, PAGE_SIZE, "%u\n", val);
  232. }
  233. static const struct sysfs_ops ttm_pool_sysfs_ops = {
  234. .show = &ttm_pool_show,
  235. .store = &ttm_pool_store,
  236. };
  237. static struct kobj_type ttm_pool_kobj_type = {
  238. .release = &ttm_pool_kobj_release,
  239. .sysfs_ops = &ttm_pool_sysfs_ops,
  240. .default_attrs = ttm_pool_attrs,
  241. };
  242. #ifndef CONFIG_X86
  243. static int set_pages_array_wb(struct page **pages, int addrinarray)
  244. {
  245. #if IS_ENABLED(CONFIG_AGP)
  246. int i;
  247. for (i = 0; i < addrinarray; i++)
  248. unmap_page_from_agp(pages[i]);
  249. #endif
  250. return 0;
  251. }
  252. static int set_pages_array_wc(struct page **pages, int addrinarray)
  253. {
  254. #if IS_ENABLED(CONFIG_AGP)
  255. int i;
  256. for (i = 0; i < addrinarray; i++)
  257. map_page_into_agp(pages[i]);
  258. #endif
  259. return 0;
  260. }
  261. static int set_pages_array_uc(struct page **pages, int addrinarray)
  262. {
  263. #if IS_ENABLED(CONFIG_AGP)
  264. int i;
  265. for (i = 0; i < addrinarray; i++)
  266. map_page_into_agp(pages[i]);
  267. #endif
  268. return 0;
  269. }
  270. #endif /* for !CONFIG_X86 */
  271. static int ttm_set_pages_caching(struct dma_pool *pool,
  272. struct page **pages, unsigned cpages)
  273. {
  274. int r = 0;
  275. /* Set page caching */
  276. if (pool->type & IS_UC) {
  277. r = set_pages_array_uc(pages, cpages);
  278. if (r)
  279. pr_err("%s: Failed to set %d pages to uc!\n",
  280. pool->dev_name, cpages);
  281. }
  282. if (pool->type & IS_WC) {
  283. r = set_pages_array_wc(pages, cpages);
  284. if (r)
  285. pr_err("%s: Failed to set %d pages to wc!\n",
  286. pool->dev_name, cpages);
  287. }
  288. return r;
  289. }
  290. static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
  291. {
  292. dma_addr_t dma = d_page->dma;
  293. d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL;
  294. dma_free_coherent(pool->dev, pool->size, (void *)d_page->vaddr, dma);
  295. kfree(d_page);
  296. d_page = NULL;
  297. }
  298. static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
  299. {
  300. struct dma_page *d_page;
  301. void *vaddr;
  302. d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
  303. if (!d_page)
  304. return NULL;
  305. vaddr = dma_alloc_coherent(pool->dev, pool->size, &d_page->dma,
  306. pool->gfp_flags);
  307. if (vaddr) {
  308. if (is_vmalloc_addr(vaddr))
  309. d_page->p = vmalloc_to_page(vaddr);
  310. else
  311. d_page->p = virt_to_page(vaddr);
  312. d_page->vaddr = (unsigned long)vaddr;
  313. if (pool->type & IS_HUGE)
  314. d_page->vaddr |= VADDR_FLAG_HUGE_POOL;
  315. } else {
  316. kfree(d_page);
  317. d_page = NULL;
  318. }
  319. return d_page;
  320. }
  321. static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
  322. {
  323. enum pool_type type = IS_UNDEFINED;
  324. if (flags & TTM_PAGE_FLAG_DMA32)
  325. type |= IS_DMA32;
  326. if (cstate == tt_cached)
  327. type |= IS_CACHED;
  328. else if (cstate == tt_uncached)
  329. type |= IS_UC;
  330. else
  331. type |= IS_WC;
  332. return type;
  333. }
  334. static void ttm_pool_update_free_locked(struct dma_pool *pool,
  335. unsigned freed_pages)
  336. {
  337. pool->npages_free -= freed_pages;
  338. pool->nfrees += freed_pages;
  339. }
  340. /* set memory back to wb and free the pages. */
  341. static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
  342. {
  343. struct page *page = d_page->p;
  344. unsigned i, num_pages;
  345. int ret;
  346. /* Don't set WB on WB page pool. */
  347. if (!(pool->type & IS_CACHED)) {
  348. num_pages = pool->size / PAGE_SIZE;
  349. for (i = 0; i < num_pages; ++i, ++page) {
  350. ret = set_pages_array_wb(&page, 1);
  351. if (ret) {
  352. pr_err("%s: Failed to set %d pages to wb!\n",
  353. pool->dev_name, 1);
  354. }
  355. }
  356. }
  357. list_del(&d_page->page_list);
  358. __ttm_dma_free_page(pool, d_page);
  359. }
  360. static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
  361. struct page *pages[], unsigned npages)
  362. {
  363. struct dma_page *d_page, *tmp;
  364. if (pool->type & IS_HUGE) {
  365. list_for_each_entry_safe(d_page, tmp, d_pages, page_list)
  366. ttm_dma_page_put(pool, d_page);
  367. return;
  368. }
  369. /* Don't set WB on WB page pool. */
  370. if (npages && !(pool->type & IS_CACHED) &&
  371. set_pages_array_wb(pages, npages))
  372. pr_err("%s: Failed to set %d pages to wb!\n",
  373. pool->dev_name, npages);
  374. list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
  375. list_del(&d_page->page_list);
  376. __ttm_dma_free_page(pool, d_page);
  377. }
  378. }
  379. /*
  380. * Free pages from pool.
  381. *
  382. * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
  383. * number of pages in one go.
  384. *
  385. * @pool: to free the pages from
  386. * @nr_free: If set to true will free all pages in pool
  387. * @use_static: Safe to use static buffer
  388. **/
  389. static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
  390. bool use_static)
  391. {
  392. static struct page *static_buf[NUM_PAGES_TO_ALLOC];
  393. unsigned long irq_flags;
  394. struct dma_page *dma_p, *tmp;
  395. struct page **pages_to_free;
  396. struct list_head d_pages;
  397. unsigned freed_pages = 0,
  398. npages_to_free = nr_free;
  399. if (NUM_PAGES_TO_ALLOC < nr_free)
  400. npages_to_free = NUM_PAGES_TO_ALLOC;
  401. #if 0
  402. if (nr_free > 1) {
  403. pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
  404. pool->dev_name, pool->name, current->pid,
  405. npages_to_free, nr_free);
  406. }
  407. #endif
  408. if (use_static)
  409. pages_to_free = static_buf;
  410. else
  411. pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
  412. GFP_KERNEL);
  413. if (!pages_to_free) {
  414. pr_err("%s: Failed to allocate memory for pool free operation\n",
  415. pool->dev_name);
  416. return 0;
  417. }
  418. INIT_LIST_HEAD(&d_pages);
  419. restart:
  420. spin_lock_irqsave(&pool->lock, irq_flags);
  421. /* We picking the oldest ones off the list */
  422. list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
  423. page_list) {
  424. if (freed_pages >= npages_to_free)
  425. break;
  426. /* Move the dma_page from one list to another. */
  427. list_move(&dma_p->page_list, &d_pages);
  428. pages_to_free[freed_pages++] = dma_p->p;
  429. /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
  430. if (freed_pages >= NUM_PAGES_TO_ALLOC) {
  431. ttm_pool_update_free_locked(pool, freed_pages);
  432. /**
  433. * Because changing page caching is costly
  434. * we unlock the pool to prevent stalling.
  435. */
  436. spin_unlock_irqrestore(&pool->lock, irq_flags);
  437. ttm_dma_pages_put(pool, &d_pages, pages_to_free,
  438. freed_pages);
  439. INIT_LIST_HEAD(&d_pages);
  440. if (likely(nr_free != FREE_ALL_PAGES))
  441. nr_free -= freed_pages;
  442. if (NUM_PAGES_TO_ALLOC >= nr_free)
  443. npages_to_free = nr_free;
  444. else
  445. npages_to_free = NUM_PAGES_TO_ALLOC;
  446. freed_pages = 0;
  447. /* free all so restart the processing */
  448. if (nr_free)
  449. goto restart;
  450. /* Not allowed to fall through or break because
  451. * following context is inside spinlock while we are
  452. * outside here.
  453. */
  454. goto out;
  455. }
  456. }
  457. /* remove range of pages from the pool */
  458. if (freed_pages) {
  459. ttm_pool_update_free_locked(pool, freed_pages);
  460. nr_free -= freed_pages;
  461. }
  462. spin_unlock_irqrestore(&pool->lock, irq_flags);
  463. if (freed_pages)
  464. ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
  465. out:
  466. if (pages_to_free != static_buf)
  467. kfree(pages_to_free);
  468. return nr_free;
  469. }
  470. static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
  471. {
  472. struct device_pools *p;
  473. struct dma_pool *pool;
  474. if (!dev)
  475. return;
  476. mutex_lock(&_manager->lock);
  477. list_for_each_entry_reverse(p, &_manager->pools, pools) {
  478. if (p->dev != dev)
  479. continue;
  480. pool = p->pool;
  481. if (pool->type != type)
  482. continue;
  483. list_del(&p->pools);
  484. kfree(p);
  485. _manager->npools--;
  486. break;
  487. }
  488. list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
  489. if (pool->type != type)
  490. continue;
  491. /* Takes a spinlock.. */
  492. /* OK to use static buffer since global mutex is held. */
  493. ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true);
  494. WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
  495. /* This code path is called after _all_ references to the
  496. * struct device has been dropped - so nobody should be
  497. * touching it. In case somebody is trying to _add_ we are
  498. * guarded by the mutex. */
  499. list_del(&pool->pools);
  500. kfree(pool);
  501. break;
  502. }
  503. mutex_unlock(&_manager->lock);
  504. }
  505. /*
  506. * On free-ing of the 'struct device' this deconstructor is run.
  507. * Albeit the pool might have already been freed earlier.
  508. */
  509. static void ttm_dma_pool_release(struct device *dev, void *res)
  510. {
  511. struct dma_pool *pool = *(struct dma_pool **)res;
  512. if (pool)
  513. ttm_dma_free_pool(dev, pool->type);
  514. }
  515. static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
  516. {
  517. return *(struct dma_pool **)res == match_data;
  518. }
  519. static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
  520. enum pool_type type)
  521. {
  522. const char *n[] = {"wc", "uc", "cached", " dma32", "huge"};
  523. enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_HUGE};
  524. struct device_pools *sec_pool = NULL;
  525. struct dma_pool *pool = NULL, **ptr;
  526. unsigned i;
  527. int ret = -ENODEV;
  528. char *p;
  529. if (!dev)
  530. return NULL;
  531. ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
  532. if (!ptr)
  533. return NULL;
  534. ret = -ENOMEM;
  535. pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
  536. dev_to_node(dev));
  537. if (!pool)
  538. goto err_mem;
  539. sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
  540. dev_to_node(dev));
  541. if (!sec_pool)
  542. goto err_mem;
  543. INIT_LIST_HEAD(&sec_pool->pools);
  544. sec_pool->dev = dev;
  545. sec_pool->pool = pool;
  546. INIT_LIST_HEAD(&pool->free_list);
  547. INIT_LIST_HEAD(&pool->pools);
  548. spin_lock_init(&pool->lock);
  549. pool->dev = dev;
  550. pool->npages_free = pool->npages_in_use = 0;
  551. pool->nfrees = 0;
  552. pool->gfp_flags = flags;
  553. if (type & IS_HUGE)
  554. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  555. pool->size = HPAGE_PMD_SIZE;
  556. #else
  557. BUG();
  558. #endif
  559. else
  560. pool->size = PAGE_SIZE;
  561. pool->type = type;
  562. pool->nrefills = 0;
  563. p = pool->name;
  564. for (i = 0; i < ARRAY_SIZE(t); i++) {
  565. if (type & t[i]) {
  566. p += snprintf(p, sizeof(pool->name) - (p - pool->name),
  567. "%s", n[i]);
  568. }
  569. }
  570. *p = 0;
  571. /* We copy the name for pr_ calls b/c when dma_pool_destroy is called
  572. * - the kobj->name has already been deallocated.*/
  573. snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
  574. dev_driver_string(dev), dev_name(dev));
  575. mutex_lock(&_manager->lock);
  576. /* You can get the dma_pool from either the global: */
  577. list_add(&sec_pool->pools, &_manager->pools);
  578. _manager->npools++;
  579. /* or from 'struct device': */
  580. list_add(&pool->pools, &dev->dma_pools);
  581. mutex_unlock(&_manager->lock);
  582. *ptr = pool;
  583. devres_add(dev, ptr);
  584. return pool;
  585. err_mem:
  586. devres_free(ptr);
  587. kfree(sec_pool);
  588. kfree(pool);
  589. return ERR_PTR(ret);
  590. }
  591. static struct dma_pool *ttm_dma_find_pool(struct device *dev,
  592. enum pool_type type)
  593. {
  594. struct dma_pool *pool, *tmp, *found = NULL;
  595. if (type == IS_UNDEFINED)
  596. return found;
  597. /* NB: We iterate on the 'struct dev' which has no spinlock, but
  598. * it does have a kref which we have taken. The kref is taken during
  599. * graphic driver loading - in the drm_pci_init it calls either
  600. * pci_dev_get or pci_register_driver which both end up taking a kref
  601. * on 'struct device'.
  602. *
  603. * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
  604. * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
  605. * thing is at that point of time there are no pages associated with the
  606. * driver so this function will not be called.
  607. */
  608. list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) {
  609. if (pool->type != type)
  610. continue;
  611. found = pool;
  612. break;
  613. }
  614. return found;
  615. }
  616. /*
  617. * Free pages the pages that failed to change the caching state. If there
  618. * are pages that have changed their caching state already put them to the
  619. * pool.
  620. */
  621. static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
  622. struct list_head *d_pages,
  623. struct page **failed_pages,
  624. unsigned cpages)
  625. {
  626. struct dma_page *d_page, *tmp;
  627. struct page *p;
  628. unsigned i = 0;
  629. p = failed_pages[0];
  630. if (!p)
  631. return;
  632. /* Find the failed page. */
  633. list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
  634. if (d_page->p != p)
  635. continue;
  636. /* .. and then progress over the full list. */
  637. list_del(&d_page->page_list);
  638. __ttm_dma_free_page(pool, d_page);
  639. if (++i < cpages)
  640. p = failed_pages[i];
  641. else
  642. break;
  643. }
  644. }
  645. /*
  646. * Allocate 'count' pages, and put 'need' number of them on the
  647. * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
  648. * The full list of pages should also be on 'd_pages'.
  649. * We return zero for success, and negative numbers as errors.
  650. */
  651. static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
  652. struct list_head *d_pages,
  653. unsigned count)
  654. {
  655. struct page **caching_array;
  656. struct dma_page *dma_p;
  657. struct page *p;
  658. int r = 0;
  659. unsigned i, j, npages, cpages;
  660. unsigned max_cpages = min(count,
  661. (unsigned)(PAGE_SIZE/sizeof(struct page *)));
  662. /* allocate array for page caching change */
  663. caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
  664. if (!caching_array) {
  665. pr_err("%s: Unable to allocate table for new pages\n",
  666. pool->dev_name);
  667. return -ENOMEM;
  668. }
  669. if (count > 1) {
  670. pr_debug("%s: (%s:%d) Getting %d pages\n",
  671. pool->dev_name, pool->name, current->pid, count);
  672. }
  673. for (i = 0, cpages = 0; i < count; ++i) {
  674. dma_p = __ttm_dma_alloc_page(pool);
  675. if (!dma_p) {
  676. pr_err("%s: Unable to get page %u\n",
  677. pool->dev_name, i);
  678. /* store already allocated pages in the pool after
  679. * setting the caching state */
  680. if (cpages) {
  681. r = ttm_set_pages_caching(pool, caching_array,
  682. cpages);
  683. if (r)
  684. ttm_dma_handle_caching_state_failure(
  685. pool, d_pages, caching_array,
  686. cpages);
  687. }
  688. r = -ENOMEM;
  689. goto out;
  690. }
  691. p = dma_p->p;
  692. list_add(&dma_p->page_list, d_pages);
  693. #ifdef CONFIG_HIGHMEM
  694. /* gfp flags of highmem page should never be dma32 so we
  695. * we should be fine in such case
  696. */
  697. if (PageHighMem(p))
  698. continue;
  699. #endif
  700. npages = pool->size / PAGE_SIZE;
  701. for (j = 0; j < npages; ++j) {
  702. caching_array[cpages++] = p + j;
  703. if (cpages == max_cpages) {
  704. /* Note: Cannot hold the spinlock */
  705. r = ttm_set_pages_caching(pool, caching_array,
  706. cpages);
  707. if (r) {
  708. ttm_dma_handle_caching_state_failure(
  709. pool, d_pages, caching_array,
  710. cpages);
  711. goto out;
  712. }
  713. cpages = 0;
  714. }
  715. }
  716. }
  717. if (cpages) {
  718. r = ttm_set_pages_caching(pool, caching_array, cpages);
  719. if (r)
  720. ttm_dma_handle_caching_state_failure(pool, d_pages,
  721. caching_array, cpages);
  722. }
  723. out:
  724. kfree(caching_array);
  725. return r;
  726. }
  727. /*
  728. * @return count of pages still required to fulfill the request.
  729. */
  730. static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
  731. unsigned long *irq_flags)
  732. {
  733. unsigned count = _manager->options.small;
  734. int r = pool->npages_free;
  735. if (count > pool->npages_free) {
  736. struct list_head d_pages;
  737. INIT_LIST_HEAD(&d_pages);
  738. spin_unlock_irqrestore(&pool->lock, *irq_flags);
  739. /* Returns how many more are neccessary to fulfill the
  740. * request. */
  741. r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
  742. spin_lock_irqsave(&pool->lock, *irq_flags);
  743. if (!r) {
  744. /* Add the fresh to the end.. */
  745. list_splice(&d_pages, &pool->free_list);
  746. ++pool->nrefills;
  747. pool->npages_free += count;
  748. r = count;
  749. } else {
  750. struct dma_page *d_page;
  751. unsigned cpages = 0;
  752. pr_err("%s: Failed to fill %s pool (r:%d)!\n",
  753. pool->dev_name, pool->name, r);
  754. list_for_each_entry(d_page, &d_pages, page_list) {
  755. cpages++;
  756. }
  757. list_splice_tail(&d_pages, &pool->free_list);
  758. pool->npages_free += cpages;
  759. r = cpages;
  760. }
  761. }
  762. return r;
  763. }
  764. /*
  765. * @return count of pages still required to fulfill the request.
  766. * The populate list is actually a stack (not that is matters as TTM
  767. * allocates one page at a time.
  768. */
  769. static int ttm_dma_pool_get_pages(struct dma_pool *pool,
  770. struct ttm_dma_tt *ttm_dma,
  771. unsigned index)
  772. {
  773. struct dma_page *d_page;
  774. struct ttm_tt *ttm = &ttm_dma->ttm;
  775. unsigned long irq_flags;
  776. int count, r = -ENOMEM;
  777. spin_lock_irqsave(&pool->lock, irq_flags);
  778. count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
  779. if (count) {
  780. d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
  781. ttm->pages[index] = d_page->p;
  782. ttm_dma->dma_address[index] = d_page->dma;
  783. list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
  784. r = 0;
  785. pool->npages_in_use += 1;
  786. pool->npages_free -= 1;
  787. }
  788. spin_unlock_irqrestore(&pool->lock, irq_flags);
  789. return r;
  790. }
  791. static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
  792. {
  793. struct ttm_tt *ttm = &ttm_dma->ttm;
  794. gfp_t gfp_flags;
  795. if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
  796. gfp_flags = GFP_USER | GFP_DMA32;
  797. else
  798. gfp_flags = GFP_HIGHUSER;
  799. if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
  800. gfp_flags |= __GFP_ZERO;
  801. if (huge) {
  802. gfp_flags |= GFP_TRANSHUGE;
  803. gfp_flags &= ~__GFP_MOVABLE;
  804. gfp_flags &= ~__GFP_COMP;
  805. }
  806. return gfp_flags;
  807. }
  808. /*
  809. * On success pages list will hold count number of correctly
  810. * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
  811. */
  812. int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
  813. {
  814. struct ttm_tt *ttm = &ttm_dma->ttm;
  815. struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
  816. unsigned long num_pages = ttm->num_pages;
  817. struct dma_pool *pool;
  818. enum pool_type type;
  819. unsigned i;
  820. int ret;
  821. if (ttm->state != tt_unpopulated)
  822. return 0;
  823. INIT_LIST_HEAD(&ttm_dma->pages_list);
  824. i = 0;
  825. type = ttm_to_type(ttm->page_flags, ttm->caching_state);
  826. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  827. if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
  828. goto skip_huge;
  829. pool = ttm_dma_find_pool(dev, type | IS_HUGE);
  830. if (!pool) {
  831. gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, true);
  832. pool = ttm_dma_pool_init(dev, gfp_flags, type | IS_HUGE);
  833. if (IS_ERR_OR_NULL(pool))
  834. goto skip_huge;
  835. }
  836. while (num_pages >= HPAGE_PMD_NR) {
  837. unsigned j;
  838. ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
  839. if (ret != 0)
  840. break;
  841. ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
  842. pool->size);
  843. if (unlikely(ret != 0)) {
  844. ttm_dma_unpopulate(ttm_dma, dev);
  845. return -ENOMEM;
  846. }
  847. for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) {
  848. ttm->pages[j] = ttm->pages[j - 1] + 1;
  849. ttm_dma->dma_address[j] = ttm_dma->dma_address[j - 1] +
  850. PAGE_SIZE;
  851. }
  852. i += HPAGE_PMD_NR;
  853. num_pages -= HPAGE_PMD_NR;
  854. }
  855. skip_huge:
  856. #endif
  857. pool = ttm_dma_find_pool(dev, type);
  858. if (!pool) {
  859. gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, false);
  860. pool = ttm_dma_pool_init(dev, gfp_flags, type);
  861. if (IS_ERR_OR_NULL(pool))
  862. return -ENOMEM;
  863. }
  864. while (num_pages) {
  865. ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
  866. if (ret != 0) {
  867. ttm_dma_unpopulate(ttm_dma, dev);
  868. return -ENOMEM;
  869. }
  870. ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
  871. pool->size);
  872. if (unlikely(ret != 0)) {
  873. ttm_dma_unpopulate(ttm_dma, dev);
  874. return -ENOMEM;
  875. }
  876. ++i;
  877. --num_pages;
  878. }
  879. if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
  880. ret = ttm_tt_swapin(ttm);
  881. if (unlikely(ret != 0)) {
  882. ttm_dma_unpopulate(ttm_dma, dev);
  883. return ret;
  884. }
  885. }
  886. ttm->state = tt_unbound;
  887. return 0;
  888. }
  889. EXPORT_SYMBOL_GPL(ttm_dma_populate);
  890. /* Put all pages in pages list to correct pool to wait for reuse */
  891. void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
  892. {
  893. struct ttm_tt *ttm = &ttm_dma->ttm;
  894. struct dma_pool *pool;
  895. struct dma_page *d_page, *next;
  896. enum pool_type type;
  897. bool is_cached = false;
  898. unsigned count, i, npages = 0;
  899. unsigned long irq_flags;
  900. type = ttm_to_type(ttm->page_flags, ttm->caching_state);
  901. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  902. pool = ttm_dma_find_pool(dev, type | IS_HUGE);
  903. if (pool) {
  904. count = 0;
  905. list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
  906. page_list) {
  907. if (!(d_page->vaddr & VADDR_FLAG_HUGE_POOL))
  908. continue;
  909. count++;
  910. ttm_mem_global_free_page(ttm->glob->mem_glob,
  911. d_page->p, pool->size);
  912. ttm_dma_page_put(pool, d_page);
  913. }
  914. spin_lock_irqsave(&pool->lock, irq_flags);
  915. pool->npages_in_use -= count;
  916. pool->nfrees += count;
  917. spin_unlock_irqrestore(&pool->lock, irq_flags);
  918. }
  919. #endif
  920. pool = ttm_dma_find_pool(dev, type);
  921. if (!pool)
  922. return;
  923. is_cached = (ttm_dma_find_pool(pool->dev,
  924. ttm_to_type(ttm->page_flags, tt_cached)) == pool);
  925. /* make sure pages array match list and count number of pages */
  926. count = 0;
  927. list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
  928. ttm->pages[count] = d_page->p;
  929. count++;
  930. }
  931. spin_lock_irqsave(&pool->lock, irq_flags);
  932. pool->npages_in_use -= count;
  933. if (is_cached) {
  934. pool->nfrees += count;
  935. } else {
  936. pool->npages_free += count;
  937. list_splice(&ttm_dma->pages_list, &pool->free_list);
  938. /*
  939. * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages
  940. * to free in order to minimize calls to set_memory_wb().
  941. */
  942. if (pool->npages_free >= (_manager->options.max_size +
  943. NUM_PAGES_TO_ALLOC))
  944. npages = pool->npages_free - _manager->options.max_size;
  945. }
  946. spin_unlock_irqrestore(&pool->lock, irq_flags);
  947. if (is_cached) {
  948. list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
  949. ttm_mem_global_free_page(ttm->glob->mem_glob,
  950. d_page->p, pool->size);
  951. ttm_dma_page_put(pool, d_page);
  952. }
  953. } else {
  954. for (i = 0; i < count; i++) {
  955. ttm_mem_global_free_page(ttm->glob->mem_glob,
  956. ttm->pages[i], pool->size);
  957. }
  958. }
  959. INIT_LIST_HEAD(&ttm_dma->pages_list);
  960. for (i = 0; i < ttm->num_pages; i++) {
  961. ttm->pages[i] = NULL;
  962. ttm_dma->dma_address[i] = 0;
  963. }
  964. /* shrink pool if necessary (only on !is_cached pools)*/
  965. if (npages)
  966. ttm_dma_page_pool_free(pool, npages, false);
  967. ttm->state = tt_unpopulated;
  968. }
  969. EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
  970. /**
  971. * Callback for mm to request pool to reduce number of page held.
  972. *
  973. * XXX: (dchinner) Deadlock warning!
  974. *
  975. * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
  976. * shrinkers
  977. */
  978. static unsigned long
  979. ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
  980. {
  981. static unsigned start_pool;
  982. unsigned idx = 0;
  983. unsigned pool_offset;
  984. unsigned shrink_pages = sc->nr_to_scan;
  985. struct device_pools *p;
  986. unsigned long freed = 0;
  987. if (list_empty(&_manager->pools))
  988. return SHRINK_STOP;
  989. if (!mutex_trylock(&_manager->lock))
  990. return SHRINK_STOP;
  991. if (!_manager->npools)
  992. goto out;
  993. pool_offset = ++start_pool % _manager->npools;
  994. list_for_each_entry(p, &_manager->pools, pools) {
  995. unsigned nr_free;
  996. if (!p->dev)
  997. continue;
  998. if (shrink_pages == 0)
  999. break;
  1000. /* Do it in round-robin fashion. */
  1001. if (++idx < pool_offset)
  1002. continue;
  1003. nr_free = shrink_pages;
  1004. /* OK to use static buffer since global mutex is held. */
  1005. shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
  1006. freed += nr_free - shrink_pages;
  1007. pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
  1008. p->pool->dev_name, p->pool->name, current->pid,
  1009. nr_free, shrink_pages);
  1010. }
  1011. out:
  1012. mutex_unlock(&_manager->lock);
  1013. return freed;
  1014. }
  1015. static unsigned long
  1016. ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
  1017. {
  1018. struct device_pools *p;
  1019. unsigned long count = 0;
  1020. if (!mutex_trylock(&_manager->lock))
  1021. return 0;
  1022. list_for_each_entry(p, &_manager->pools, pools)
  1023. count += p->pool->npages_free;
  1024. mutex_unlock(&_manager->lock);
  1025. return count;
  1026. }
  1027. static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
  1028. {
  1029. manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
  1030. manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
  1031. manager->mm_shrink.seeks = 1;
  1032. register_shrinker(&manager->mm_shrink);
  1033. }
  1034. static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
  1035. {
  1036. unregister_shrinker(&manager->mm_shrink);
  1037. }
  1038. int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
  1039. {
  1040. int ret = -ENOMEM;
  1041. WARN_ON(_manager);
  1042. pr_info("Initializing DMA pool allocator\n");
  1043. _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
  1044. if (!_manager)
  1045. goto err;
  1046. mutex_init(&_manager->lock);
  1047. INIT_LIST_HEAD(&_manager->pools);
  1048. _manager->options.max_size = max_pages;
  1049. _manager->options.small = SMALL_ALLOCATION;
  1050. _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
  1051. /* This takes care of auto-freeing the _manager */
  1052. ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
  1053. &glob->kobj, "dma_pool");
  1054. if (unlikely(ret != 0)) {
  1055. kobject_put(&_manager->kobj);
  1056. goto err;
  1057. }
  1058. ttm_dma_pool_mm_shrink_init(_manager);
  1059. return 0;
  1060. err:
  1061. return ret;
  1062. }
  1063. void ttm_dma_page_alloc_fini(void)
  1064. {
  1065. struct device_pools *p, *t;
  1066. pr_info("Finalizing DMA pool allocator\n");
  1067. ttm_dma_pool_mm_shrink_fini(_manager);
  1068. list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
  1069. dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
  1070. current->pid);
  1071. WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
  1072. ttm_dma_pool_match, p->pool));
  1073. ttm_dma_free_pool(p->dev, p->pool->type);
  1074. }
  1075. kobject_put(&_manager->kobj);
  1076. _manager = NULL;
  1077. }
  1078. int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
  1079. {
  1080. struct device_pools *p;
  1081. struct dma_pool *pool = NULL;
  1082. char *h[] = {"pool", "refills", "pages freed", "inuse", "available",
  1083. "name", "virt", "busaddr"};
  1084. if (!_manager) {
  1085. seq_printf(m, "No pool allocator running.\n");
  1086. return 0;
  1087. }
  1088. seq_printf(m, "%13s %12s %13s %8s %8s %8s\n",
  1089. h[0], h[1], h[2], h[3], h[4], h[5]);
  1090. mutex_lock(&_manager->lock);
  1091. list_for_each_entry(p, &_manager->pools, pools) {
  1092. struct device *dev = p->dev;
  1093. if (!dev)
  1094. continue;
  1095. pool = p->pool;
  1096. seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
  1097. pool->name, pool->nrefills,
  1098. pool->nfrees, pool->npages_in_use,
  1099. pool->npages_free,
  1100. pool->dev_name);
  1101. }
  1102. mutex_unlock(&_manager->lock);
  1103. return 0;
  1104. }
  1105. EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
  1106. #endif