ttm_page_alloc_dma.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287
  1. /*
  2. * Copyright 2011 (c) Oracle Corp.
  3. * Permission is hereby granted, free of charge, to any person obtaining a
  4. * copy of this software and associated documentation files (the "Software"),
  5. * to deal in the Software without restriction, including without limitation
  6. * the rights to use, copy, modify, merge, publish, distribute, sub license,
  7. * and/or sell copies of the Software, and to permit persons to whom the
  8. * Software is furnished to do so, subject to the following conditions:
  9. *
  10. * The above copyright notice and this permission notice (including the
  11. * next paragraph) shall be included in all copies or substantial portions
  12. * of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  17. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  19. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  20. * DEALINGS IN THE SOFTWARE.
  21. *
  22. * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
  23. */
  24. /*
  25. * A simple DMA pool losely based on dmapool.c. It has certain advantages
  26. * over the DMA pools:
  27. * - Pool collects resently freed pages for reuse (and hooks up to
  28. * the shrinker).
  29. * - Tracks currently in use pages
  30. * - Tracks whether the page is UC, WB or cached (and reverts to WB
  31. * when freed).
  32. */
  33. #if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
  34. #define pr_fmt(fmt) "[TTM] " fmt
  35. #include <linux/dma-mapping.h>
  36. #include <linux/list.h>
  37. #include <linux/seq_file.h> /* for seq_printf */
  38. #include <linux/slab.h>
  39. #include <linux/spinlock.h>
  40. #include <linux/highmem.h>
  41. #include <linux/mm_types.h>
  42. #include <linux/module.h>
  43. #include <linux/mm.h>
  44. #include <linux/atomic.h>
  45. #include <linux/device.h>
  46. #include <linux/kthread.h>
  47. #include <drm/ttm/ttm_bo_driver.h>
  48. #include <drm/ttm/ttm_page_alloc.h>
  49. #if IS_ENABLED(CONFIG_AGP)
  50. #include <asm/agp.h>
  51. #endif
  52. #ifdef CONFIG_X86
  53. #include <asm/set_memory.h>
  54. #endif
  55. #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
  56. #define SMALL_ALLOCATION 4
  57. #define FREE_ALL_PAGES (~0U)
  58. #define VADDR_FLAG_HUGE_POOL 1UL
  59. #define VADDR_FLAG_UPDATED_COUNT 2UL
  60. enum pool_type {
  61. IS_UNDEFINED = 0,
  62. IS_WC = 1 << 1,
  63. IS_UC = 1 << 2,
  64. IS_CACHED = 1 << 3,
  65. IS_DMA32 = 1 << 4,
  66. IS_HUGE = 1 << 5
  67. };
  68. /*
  69. * The pool structure. There are up to nine pools:
  70. * - generic (not restricted to DMA32):
  71. * - write combined, uncached, cached.
  72. * - dma32 (up to 2^32 - so up 4GB):
  73. * - write combined, uncached, cached.
  74. * - huge (not restricted to DMA32):
  75. * - write combined, uncached, cached.
  76. * for each 'struct device'. The 'cached' is for pages that are actively used.
  77. * The other ones can be shrunk by the shrinker API if neccessary.
  78. * @pools: The 'struct device->dma_pools' link.
  79. * @type: Type of the pool
  80. * @lock: Protects the free_list from concurrnet access. Must be
  81. * used with irqsave/irqrestore variants because pool allocator maybe called
  82. * from delayed work.
  83. * @free_list: Pool of pages that are free to be used. No order requirements.
  84. * @dev: The device that is associated with these pools.
  85. * @size: Size used during DMA allocation.
  86. * @npages_free: Count of available pages for re-use.
  87. * @npages_in_use: Count of pages that are in use.
  88. * @nfrees: Stats when pool is shrinking.
  89. * @nrefills: Stats when the pool is grown.
  90. * @gfp_flags: Flags to pass for alloc_page.
  91. * @name: Name of the pool.
  92. * @dev_name: Name derieved from dev - similar to how dev_info works.
  93. * Used during shutdown as the dev_info during release is unavailable.
  94. */
  95. struct dma_pool {
  96. struct list_head pools; /* The 'struct device->dma_pools link */
  97. enum pool_type type;
  98. spinlock_t lock;
  99. struct list_head free_list;
  100. struct device *dev;
  101. unsigned size;
  102. unsigned npages_free;
  103. unsigned npages_in_use;
  104. unsigned long nfrees; /* Stats when shrunk. */
  105. unsigned long nrefills; /* Stats when grown. */
  106. gfp_t gfp_flags;
  107. char name[13]; /* "cached dma32" */
  108. char dev_name[64]; /* Constructed from dev */
  109. };
  110. /*
  111. * The accounting page keeping track of the allocated page along with
  112. * the DMA address.
  113. * @page_list: The link to the 'page_list' in 'struct dma_pool'.
  114. * @vaddr: The virtual address of the page and a flag if the page belongs to a
  115. * huge pool
  116. * @dma: The bus address of the page. If the page is not allocated
  117. * via the DMA API, it will be -1.
  118. */
  119. struct dma_page {
  120. struct list_head page_list;
  121. unsigned long vaddr;
  122. struct page *p;
  123. dma_addr_t dma;
  124. };
  125. /*
  126. * Limits for the pool. They are handled without locks because only place where
  127. * they may change is in sysfs store. They won't have immediate effect anyway
  128. * so forcing serialization to access them is pointless.
  129. */
  130. struct ttm_pool_opts {
  131. unsigned alloc_size;
  132. unsigned max_size;
  133. unsigned small;
  134. };
  135. /*
  136. * Contains the list of all of the 'struct device' and their corresponding
  137. * DMA pools. Guarded by _mutex->lock.
  138. * @pools: The link to 'struct ttm_pool_manager->pools'
  139. * @dev: The 'struct device' associated with the 'pool'
  140. * @pool: The 'struct dma_pool' associated with the 'dev'
  141. */
  142. struct device_pools {
  143. struct list_head pools;
  144. struct device *dev;
  145. struct dma_pool *pool;
  146. };
  147. /*
  148. * struct ttm_pool_manager - Holds memory pools for fast allocation
  149. *
  150. * @lock: Lock used when adding/removing from pools
  151. * @pools: List of 'struct device' and 'struct dma_pool' tuples.
  152. * @options: Limits for the pool.
  153. * @npools: Total amount of pools in existence.
  154. * @shrinker: The structure used by [un|]register_shrinker
  155. */
  156. struct ttm_pool_manager {
  157. struct mutex lock;
  158. struct list_head pools;
  159. struct ttm_pool_opts options;
  160. unsigned npools;
  161. struct shrinker mm_shrink;
  162. struct kobject kobj;
  163. };
  164. static struct ttm_pool_manager *_manager;
  165. static struct attribute ttm_page_pool_max = {
  166. .name = "pool_max_size",
  167. .mode = S_IRUGO | S_IWUSR
  168. };
  169. static struct attribute ttm_page_pool_small = {
  170. .name = "pool_small_allocation",
  171. .mode = S_IRUGO | S_IWUSR
  172. };
  173. static struct attribute ttm_page_pool_alloc_size = {
  174. .name = "pool_allocation_size",
  175. .mode = S_IRUGO | S_IWUSR
  176. };
  177. static struct attribute *ttm_pool_attrs[] = {
  178. &ttm_page_pool_max,
  179. &ttm_page_pool_small,
  180. &ttm_page_pool_alloc_size,
  181. NULL
  182. };
  183. static void ttm_pool_kobj_release(struct kobject *kobj)
  184. {
  185. struct ttm_pool_manager *m =
  186. container_of(kobj, struct ttm_pool_manager, kobj);
  187. kfree(m);
  188. }
  189. static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
  190. const char *buffer, size_t size)
  191. {
  192. struct ttm_pool_manager *m =
  193. container_of(kobj, struct ttm_pool_manager, kobj);
  194. int chars;
  195. unsigned val;
  196. chars = sscanf(buffer, "%u", &val);
  197. if (chars == 0)
  198. return size;
  199. /* Convert kb to number of pages */
  200. val = val / (PAGE_SIZE >> 10);
  201. if (attr == &ttm_page_pool_max) {
  202. m->options.max_size = val;
  203. } else if (attr == &ttm_page_pool_small) {
  204. m->options.small = val;
  205. } else if (attr == &ttm_page_pool_alloc_size) {
  206. if (val > NUM_PAGES_TO_ALLOC*8) {
  207. pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
  208. NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
  209. NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
  210. return size;
  211. } else if (val > NUM_PAGES_TO_ALLOC) {
  212. pr_warn("Setting allocation size to larger than %lu is not recommended\n",
  213. NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
  214. }
  215. m->options.alloc_size = val;
  216. }
  217. return size;
  218. }
  219. static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
  220. char *buffer)
  221. {
  222. struct ttm_pool_manager *m =
  223. container_of(kobj, struct ttm_pool_manager, kobj);
  224. unsigned val = 0;
  225. if (attr == &ttm_page_pool_max)
  226. val = m->options.max_size;
  227. else if (attr == &ttm_page_pool_small)
  228. val = m->options.small;
  229. else if (attr == &ttm_page_pool_alloc_size)
  230. val = m->options.alloc_size;
  231. val = val * (PAGE_SIZE >> 10);
  232. return snprintf(buffer, PAGE_SIZE, "%u\n", val);
  233. }
  234. static const struct sysfs_ops ttm_pool_sysfs_ops = {
  235. .show = &ttm_pool_show,
  236. .store = &ttm_pool_store,
  237. };
  238. static struct kobj_type ttm_pool_kobj_type = {
  239. .release = &ttm_pool_kobj_release,
  240. .sysfs_ops = &ttm_pool_sysfs_ops,
  241. .default_attrs = ttm_pool_attrs,
  242. };
  243. #ifndef CONFIG_X86
  244. static int set_pages_array_wb(struct page **pages, int addrinarray)
  245. {
  246. #if IS_ENABLED(CONFIG_AGP)
  247. int i;
  248. for (i = 0; i < addrinarray; i++)
  249. unmap_page_from_agp(pages[i]);
  250. #endif
  251. return 0;
  252. }
  253. static int set_pages_array_wc(struct page **pages, int addrinarray)
  254. {
  255. #if IS_ENABLED(CONFIG_AGP)
  256. int i;
  257. for (i = 0; i < addrinarray; i++)
  258. map_page_into_agp(pages[i]);
  259. #endif
  260. return 0;
  261. }
  262. static int set_pages_array_uc(struct page **pages, int addrinarray)
  263. {
  264. #if IS_ENABLED(CONFIG_AGP)
  265. int i;
  266. for (i = 0; i < addrinarray; i++)
  267. map_page_into_agp(pages[i]);
  268. #endif
  269. return 0;
  270. }
  271. #endif /* for !CONFIG_X86 */
  272. static int ttm_set_pages_caching(struct dma_pool *pool,
  273. struct page **pages, unsigned cpages)
  274. {
  275. int r = 0;
  276. /* Set page caching */
  277. if (pool->type & IS_UC) {
  278. r = set_pages_array_uc(pages, cpages);
  279. if (r)
  280. pr_err("%s: Failed to set %d pages to uc!\n",
  281. pool->dev_name, cpages);
  282. }
  283. if (pool->type & IS_WC) {
  284. r = set_pages_array_wc(pages, cpages);
  285. if (r)
  286. pr_err("%s: Failed to set %d pages to wc!\n",
  287. pool->dev_name, cpages);
  288. }
  289. return r;
  290. }
  291. static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
  292. {
  293. dma_addr_t dma = d_page->dma;
  294. d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL;
  295. dma_free_coherent(pool->dev, pool->size, (void *)d_page->vaddr, dma);
  296. kfree(d_page);
  297. d_page = NULL;
  298. }
  299. static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
  300. {
  301. struct dma_page *d_page;
  302. unsigned long attrs = 0;
  303. void *vaddr;
  304. d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
  305. if (!d_page)
  306. return NULL;
  307. if (pool->type & IS_HUGE)
  308. attrs = DMA_ATTR_NO_WARN;
  309. vaddr = dma_alloc_attrs(pool->dev, pool->size, &d_page->dma,
  310. pool->gfp_flags, attrs);
  311. if (vaddr) {
  312. if (is_vmalloc_addr(vaddr))
  313. d_page->p = vmalloc_to_page(vaddr);
  314. else
  315. d_page->p = virt_to_page(vaddr);
  316. d_page->vaddr = (unsigned long)vaddr;
  317. if (pool->type & IS_HUGE)
  318. d_page->vaddr |= VADDR_FLAG_HUGE_POOL;
  319. } else {
  320. kfree(d_page);
  321. d_page = NULL;
  322. }
  323. return d_page;
  324. }
  325. static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
  326. {
  327. enum pool_type type = IS_UNDEFINED;
  328. if (flags & TTM_PAGE_FLAG_DMA32)
  329. type |= IS_DMA32;
  330. if (cstate == tt_cached)
  331. type |= IS_CACHED;
  332. else if (cstate == tt_uncached)
  333. type |= IS_UC;
  334. else
  335. type |= IS_WC;
  336. return type;
  337. }
  338. static void ttm_pool_update_free_locked(struct dma_pool *pool,
  339. unsigned freed_pages)
  340. {
  341. pool->npages_free -= freed_pages;
  342. pool->nfrees += freed_pages;
  343. }
  344. /* set memory back to wb and free the pages. */
  345. static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
  346. {
  347. struct page *page = d_page->p;
  348. unsigned i, num_pages;
  349. /* Don't set WB on WB page pool. */
  350. if (!(pool->type & IS_CACHED)) {
  351. num_pages = pool->size / PAGE_SIZE;
  352. for (i = 0; i < num_pages; ++i, ++page) {
  353. if (set_pages_array_wb(&page, 1)) {
  354. pr_err("%s: Failed to set %d pages to wb!\n",
  355. pool->dev_name, 1);
  356. }
  357. }
  358. }
  359. list_del(&d_page->page_list);
  360. __ttm_dma_free_page(pool, d_page);
  361. }
  362. static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
  363. struct page *pages[], unsigned npages)
  364. {
  365. struct dma_page *d_page, *tmp;
  366. if (pool->type & IS_HUGE) {
  367. list_for_each_entry_safe(d_page, tmp, d_pages, page_list)
  368. ttm_dma_page_put(pool, d_page);
  369. return;
  370. }
  371. /* Don't set WB on WB page pool. */
  372. if (npages && !(pool->type & IS_CACHED) &&
  373. set_pages_array_wb(pages, npages))
  374. pr_err("%s: Failed to set %d pages to wb!\n",
  375. pool->dev_name, npages);
  376. list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
  377. list_del(&d_page->page_list);
  378. __ttm_dma_free_page(pool, d_page);
  379. }
  380. }
  381. /*
  382. * Free pages from pool.
  383. *
  384. * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
  385. * number of pages in one go.
  386. *
  387. * @pool: to free the pages from
  388. * @nr_free: If set to true will free all pages in pool
  389. * @use_static: Safe to use static buffer
  390. **/
  391. static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
  392. bool use_static)
  393. {
  394. static struct page *static_buf[NUM_PAGES_TO_ALLOC];
  395. unsigned long irq_flags;
  396. struct dma_page *dma_p, *tmp;
  397. struct page **pages_to_free;
  398. struct list_head d_pages;
  399. unsigned freed_pages = 0,
  400. npages_to_free = nr_free;
  401. if (NUM_PAGES_TO_ALLOC < nr_free)
  402. npages_to_free = NUM_PAGES_TO_ALLOC;
  403. #if 0
  404. if (nr_free > 1) {
  405. pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
  406. pool->dev_name, pool->name, current->pid,
  407. npages_to_free, nr_free);
  408. }
  409. #endif
  410. if (use_static)
  411. pages_to_free = static_buf;
  412. else
  413. pages_to_free = kmalloc_array(npages_to_free,
  414. sizeof(struct page *),
  415. GFP_KERNEL);
  416. if (!pages_to_free) {
  417. pr_debug("%s: Failed to allocate memory for pool free operation\n",
  418. pool->dev_name);
  419. return 0;
  420. }
  421. INIT_LIST_HEAD(&d_pages);
  422. restart:
  423. spin_lock_irqsave(&pool->lock, irq_flags);
  424. /* We picking the oldest ones off the list */
  425. list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
  426. page_list) {
  427. if (freed_pages >= npages_to_free)
  428. break;
  429. /* Move the dma_page from one list to another. */
  430. list_move(&dma_p->page_list, &d_pages);
  431. pages_to_free[freed_pages++] = dma_p->p;
  432. /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
  433. if (freed_pages >= NUM_PAGES_TO_ALLOC) {
  434. ttm_pool_update_free_locked(pool, freed_pages);
  435. /**
  436. * Because changing page caching is costly
  437. * we unlock the pool to prevent stalling.
  438. */
  439. spin_unlock_irqrestore(&pool->lock, irq_flags);
  440. ttm_dma_pages_put(pool, &d_pages, pages_to_free,
  441. freed_pages);
  442. INIT_LIST_HEAD(&d_pages);
  443. if (likely(nr_free != FREE_ALL_PAGES))
  444. nr_free -= freed_pages;
  445. if (NUM_PAGES_TO_ALLOC >= nr_free)
  446. npages_to_free = nr_free;
  447. else
  448. npages_to_free = NUM_PAGES_TO_ALLOC;
  449. freed_pages = 0;
  450. /* free all so restart the processing */
  451. if (nr_free)
  452. goto restart;
  453. /* Not allowed to fall through or break because
  454. * following context is inside spinlock while we are
  455. * outside here.
  456. */
  457. goto out;
  458. }
  459. }
  460. /* remove range of pages from the pool */
  461. if (freed_pages) {
  462. ttm_pool_update_free_locked(pool, freed_pages);
  463. nr_free -= freed_pages;
  464. }
  465. spin_unlock_irqrestore(&pool->lock, irq_flags);
  466. if (freed_pages)
  467. ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
  468. out:
  469. if (pages_to_free != static_buf)
  470. kfree(pages_to_free);
  471. return nr_free;
  472. }
  473. static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
  474. {
  475. struct device_pools *p;
  476. struct dma_pool *pool;
  477. if (!dev)
  478. return;
  479. mutex_lock(&_manager->lock);
  480. list_for_each_entry_reverse(p, &_manager->pools, pools) {
  481. if (p->dev != dev)
  482. continue;
  483. pool = p->pool;
  484. if (pool->type != type)
  485. continue;
  486. list_del(&p->pools);
  487. kfree(p);
  488. _manager->npools--;
  489. break;
  490. }
  491. list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
  492. if (pool->type != type)
  493. continue;
  494. /* Takes a spinlock.. */
  495. /* OK to use static buffer since global mutex is held. */
  496. ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true);
  497. WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
  498. /* This code path is called after _all_ references to the
  499. * struct device has been dropped - so nobody should be
  500. * touching it. In case somebody is trying to _add_ we are
  501. * guarded by the mutex. */
  502. list_del(&pool->pools);
  503. kfree(pool);
  504. break;
  505. }
  506. mutex_unlock(&_manager->lock);
  507. }
  508. /*
  509. * On free-ing of the 'struct device' this deconstructor is run.
  510. * Albeit the pool might have already been freed earlier.
  511. */
  512. static void ttm_dma_pool_release(struct device *dev, void *res)
  513. {
  514. struct dma_pool *pool = *(struct dma_pool **)res;
  515. if (pool)
  516. ttm_dma_free_pool(dev, pool->type);
  517. }
  518. static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
  519. {
  520. return *(struct dma_pool **)res == match_data;
  521. }
  522. static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
  523. enum pool_type type)
  524. {
  525. const char *n[] = {"wc", "uc", "cached", " dma32", "huge"};
  526. enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_HUGE};
  527. struct device_pools *sec_pool = NULL;
  528. struct dma_pool *pool = NULL, **ptr;
  529. unsigned i;
  530. int ret = -ENODEV;
  531. char *p;
  532. if (!dev)
  533. return NULL;
  534. ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
  535. if (!ptr)
  536. return NULL;
  537. ret = -ENOMEM;
  538. pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
  539. dev_to_node(dev));
  540. if (!pool)
  541. goto err_mem;
  542. sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
  543. dev_to_node(dev));
  544. if (!sec_pool)
  545. goto err_mem;
  546. INIT_LIST_HEAD(&sec_pool->pools);
  547. sec_pool->dev = dev;
  548. sec_pool->pool = pool;
  549. INIT_LIST_HEAD(&pool->free_list);
  550. INIT_LIST_HEAD(&pool->pools);
  551. spin_lock_init(&pool->lock);
  552. pool->dev = dev;
  553. pool->npages_free = pool->npages_in_use = 0;
  554. pool->nfrees = 0;
  555. pool->gfp_flags = flags;
  556. if (type & IS_HUGE)
  557. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  558. pool->size = HPAGE_PMD_SIZE;
  559. #else
  560. BUG();
  561. #endif
  562. else
  563. pool->size = PAGE_SIZE;
  564. pool->type = type;
  565. pool->nrefills = 0;
  566. p = pool->name;
  567. for (i = 0; i < ARRAY_SIZE(t); i++) {
  568. if (type & t[i]) {
  569. p += snprintf(p, sizeof(pool->name) - (p - pool->name),
  570. "%s", n[i]);
  571. }
  572. }
  573. *p = 0;
  574. /* We copy the name for pr_ calls b/c when dma_pool_destroy is called
  575. * - the kobj->name has already been deallocated.*/
  576. snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
  577. dev_driver_string(dev), dev_name(dev));
  578. mutex_lock(&_manager->lock);
  579. /* You can get the dma_pool from either the global: */
  580. list_add(&sec_pool->pools, &_manager->pools);
  581. _manager->npools++;
  582. /* or from 'struct device': */
  583. list_add(&pool->pools, &dev->dma_pools);
  584. mutex_unlock(&_manager->lock);
  585. *ptr = pool;
  586. devres_add(dev, ptr);
  587. return pool;
  588. err_mem:
  589. devres_free(ptr);
  590. kfree(sec_pool);
  591. kfree(pool);
  592. return ERR_PTR(ret);
  593. }
  594. static struct dma_pool *ttm_dma_find_pool(struct device *dev,
  595. enum pool_type type)
  596. {
  597. struct dma_pool *pool, *tmp;
  598. if (type == IS_UNDEFINED)
  599. return NULL;
  600. /* NB: We iterate on the 'struct dev' which has no spinlock, but
  601. * it does have a kref which we have taken. The kref is taken during
  602. * graphic driver loading - in the drm_pci_init it calls either
  603. * pci_dev_get or pci_register_driver which both end up taking a kref
  604. * on 'struct device'.
  605. *
  606. * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
  607. * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
  608. * thing is at that point of time there are no pages associated with the
  609. * driver so this function will not be called.
  610. */
  611. list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools)
  612. if (pool->type == type)
  613. return pool;
  614. return NULL;
  615. }
  616. /*
  617. * Free pages the pages that failed to change the caching state. If there
  618. * are pages that have changed their caching state already put them to the
  619. * pool.
  620. */
  621. static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
  622. struct list_head *d_pages,
  623. struct page **failed_pages,
  624. unsigned cpages)
  625. {
  626. struct dma_page *d_page, *tmp;
  627. struct page *p;
  628. unsigned i = 0;
  629. p = failed_pages[0];
  630. if (!p)
  631. return;
  632. /* Find the failed page. */
  633. list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
  634. if (d_page->p != p)
  635. continue;
  636. /* .. and then progress over the full list. */
  637. list_del(&d_page->page_list);
  638. __ttm_dma_free_page(pool, d_page);
  639. if (++i < cpages)
  640. p = failed_pages[i];
  641. else
  642. break;
  643. }
  644. }
  645. /*
  646. * Allocate 'count' pages, and put 'need' number of them on the
  647. * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
  648. * The full list of pages should also be on 'd_pages'.
  649. * We return zero for success, and negative numbers as errors.
  650. */
  651. static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
  652. struct list_head *d_pages,
  653. unsigned count)
  654. {
  655. struct page **caching_array;
  656. struct dma_page *dma_p;
  657. struct page *p;
  658. int r = 0;
  659. unsigned i, j, npages, cpages;
  660. unsigned max_cpages = min(count,
  661. (unsigned)(PAGE_SIZE/sizeof(struct page *)));
  662. /* allocate array for page caching change */
  663. caching_array = kmalloc_array(max_cpages, sizeof(struct page *),
  664. GFP_KERNEL);
  665. if (!caching_array) {
  666. pr_debug("%s: Unable to allocate table for new pages\n",
  667. pool->dev_name);
  668. return -ENOMEM;
  669. }
  670. if (count > 1)
  671. pr_debug("%s: (%s:%d) Getting %d pages\n",
  672. pool->dev_name, pool->name, current->pid, count);
  673. for (i = 0, cpages = 0; i < count; ++i) {
  674. dma_p = __ttm_dma_alloc_page(pool);
  675. if (!dma_p) {
  676. pr_debug("%s: Unable to get page %u\n",
  677. pool->dev_name, i);
  678. /* store already allocated pages in the pool after
  679. * setting the caching state */
  680. if (cpages) {
  681. r = ttm_set_pages_caching(pool, caching_array,
  682. cpages);
  683. if (r)
  684. ttm_dma_handle_caching_state_failure(
  685. pool, d_pages, caching_array,
  686. cpages);
  687. }
  688. r = -ENOMEM;
  689. goto out;
  690. }
  691. p = dma_p->p;
  692. list_add(&dma_p->page_list, d_pages);
  693. #ifdef CONFIG_HIGHMEM
  694. /* gfp flags of highmem page should never be dma32 so we
  695. * we should be fine in such case
  696. */
  697. if (PageHighMem(p))
  698. continue;
  699. #endif
  700. npages = pool->size / PAGE_SIZE;
  701. for (j = 0; j < npages; ++j) {
  702. caching_array[cpages++] = p + j;
  703. if (cpages == max_cpages) {
  704. /* Note: Cannot hold the spinlock */
  705. r = ttm_set_pages_caching(pool, caching_array,
  706. cpages);
  707. if (r) {
  708. ttm_dma_handle_caching_state_failure(
  709. pool, d_pages, caching_array,
  710. cpages);
  711. goto out;
  712. }
  713. cpages = 0;
  714. }
  715. }
  716. }
  717. if (cpages) {
  718. r = ttm_set_pages_caching(pool, caching_array, cpages);
  719. if (r)
  720. ttm_dma_handle_caching_state_failure(pool, d_pages,
  721. caching_array, cpages);
  722. }
  723. out:
  724. kfree(caching_array);
  725. return r;
  726. }
  727. /*
  728. * @return count of pages still required to fulfill the request.
  729. */
  730. static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
  731. unsigned long *irq_flags)
  732. {
  733. unsigned count = _manager->options.small;
  734. int r = pool->npages_free;
  735. if (count > pool->npages_free) {
  736. struct list_head d_pages;
  737. INIT_LIST_HEAD(&d_pages);
  738. spin_unlock_irqrestore(&pool->lock, *irq_flags);
  739. /* Returns how many more are neccessary to fulfill the
  740. * request. */
  741. r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
  742. spin_lock_irqsave(&pool->lock, *irq_flags);
  743. if (!r) {
  744. /* Add the fresh to the end.. */
  745. list_splice(&d_pages, &pool->free_list);
  746. ++pool->nrefills;
  747. pool->npages_free += count;
  748. r = count;
  749. } else {
  750. struct dma_page *d_page;
  751. unsigned cpages = 0;
  752. pr_debug("%s: Failed to fill %s pool (r:%d)!\n",
  753. pool->dev_name, pool->name, r);
  754. list_for_each_entry(d_page, &d_pages, page_list) {
  755. cpages++;
  756. }
  757. list_splice_tail(&d_pages, &pool->free_list);
  758. pool->npages_free += cpages;
  759. r = cpages;
  760. }
  761. }
  762. return r;
  763. }
  764. /*
  765. * The populate list is actually a stack (not that is matters as TTM
  766. * allocates one page at a time.
  767. * return dma_page pointer if success, otherwise NULL.
  768. */
  769. static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool,
  770. struct ttm_dma_tt *ttm_dma,
  771. unsigned index)
  772. {
  773. struct dma_page *d_page = NULL;
  774. struct ttm_tt *ttm = &ttm_dma->ttm;
  775. unsigned long irq_flags;
  776. int count;
  777. spin_lock_irqsave(&pool->lock, irq_flags);
  778. count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
  779. if (count) {
  780. d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
  781. ttm->pages[index] = d_page->p;
  782. ttm_dma->dma_address[index] = d_page->dma;
  783. list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
  784. pool->npages_in_use += 1;
  785. pool->npages_free -= 1;
  786. }
  787. spin_unlock_irqrestore(&pool->lock, irq_flags);
  788. return d_page;
  789. }
  790. static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
  791. {
  792. struct ttm_tt *ttm = &ttm_dma->ttm;
  793. gfp_t gfp_flags;
  794. if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
  795. gfp_flags = GFP_USER | GFP_DMA32;
  796. else
  797. gfp_flags = GFP_HIGHUSER;
  798. if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
  799. gfp_flags |= __GFP_ZERO;
  800. if (huge) {
  801. gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
  802. __GFP_KSWAPD_RECLAIM;
  803. gfp_flags &= ~__GFP_MOVABLE;
  804. gfp_flags &= ~__GFP_COMP;
  805. }
  806. if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
  807. gfp_flags |= __GFP_RETRY_MAYFAIL;
  808. return gfp_flags;
  809. }
  810. /*
  811. * On success pages list will hold count number of correctly
  812. * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
  813. */
  814. int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
  815. struct ttm_operation_ctx *ctx)
  816. {
  817. struct ttm_tt *ttm = &ttm_dma->ttm;
  818. struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
  819. unsigned long num_pages = ttm->num_pages;
  820. struct dma_pool *pool;
  821. struct dma_page *d_page;
  822. enum pool_type type;
  823. unsigned i;
  824. int ret;
  825. if (ttm->state != tt_unpopulated)
  826. return 0;
  827. if (ttm_check_under_lowerlimit(mem_glob, num_pages, ctx))
  828. return -ENOMEM;
  829. INIT_LIST_HEAD(&ttm_dma->pages_list);
  830. i = 0;
  831. type = ttm_to_type(ttm->page_flags, ttm->caching_state);
  832. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  833. if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
  834. goto skip_huge;
  835. pool = ttm_dma_find_pool(dev, type | IS_HUGE);
  836. if (!pool) {
  837. gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, true);
  838. pool = ttm_dma_pool_init(dev, gfp_flags, type | IS_HUGE);
  839. if (IS_ERR_OR_NULL(pool))
  840. goto skip_huge;
  841. }
  842. while (num_pages >= HPAGE_PMD_NR) {
  843. unsigned j;
  844. d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
  845. if (!d_page)
  846. break;
  847. ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
  848. pool->size, ctx);
  849. if (unlikely(ret != 0)) {
  850. ttm_dma_unpopulate(ttm_dma, dev);
  851. return -ENOMEM;
  852. }
  853. d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
  854. for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) {
  855. ttm->pages[j] = ttm->pages[j - 1] + 1;
  856. ttm_dma->dma_address[j] = ttm_dma->dma_address[j - 1] +
  857. PAGE_SIZE;
  858. }
  859. i += HPAGE_PMD_NR;
  860. num_pages -= HPAGE_PMD_NR;
  861. }
  862. skip_huge:
  863. #endif
  864. pool = ttm_dma_find_pool(dev, type);
  865. if (!pool) {
  866. gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, false);
  867. pool = ttm_dma_pool_init(dev, gfp_flags, type);
  868. if (IS_ERR_OR_NULL(pool))
  869. return -ENOMEM;
  870. }
  871. while (num_pages) {
  872. d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
  873. if (!d_page) {
  874. ttm_dma_unpopulate(ttm_dma, dev);
  875. return -ENOMEM;
  876. }
  877. ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
  878. pool->size, ctx);
  879. if (unlikely(ret != 0)) {
  880. ttm_dma_unpopulate(ttm_dma, dev);
  881. return -ENOMEM;
  882. }
  883. d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
  884. ++i;
  885. --num_pages;
  886. }
  887. if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
  888. ret = ttm_tt_swapin(ttm);
  889. if (unlikely(ret != 0)) {
  890. ttm_dma_unpopulate(ttm_dma, dev);
  891. return ret;
  892. }
  893. }
  894. ttm->state = tt_unbound;
  895. return 0;
  896. }
  897. EXPORT_SYMBOL_GPL(ttm_dma_populate);
  898. /* Put all pages in pages list to correct pool to wait for reuse */
  899. void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
  900. {
  901. struct ttm_tt *ttm = &ttm_dma->ttm;
  902. struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
  903. struct dma_pool *pool;
  904. struct dma_page *d_page, *next;
  905. enum pool_type type;
  906. bool is_cached = false;
  907. unsigned count, i, npages = 0;
  908. unsigned long irq_flags;
  909. type = ttm_to_type(ttm->page_flags, ttm->caching_state);
  910. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  911. pool = ttm_dma_find_pool(dev, type | IS_HUGE);
  912. if (pool) {
  913. count = 0;
  914. list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
  915. page_list) {
  916. if (!(d_page->vaddr & VADDR_FLAG_HUGE_POOL))
  917. continue;
  918. count++;
  919. if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
  920. ttm_mem_global_free_page(mem_glob, d_page->p,
  921. pool->size);
  922. d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
  923. }
  924. ttm_dma_page_put(pool, d_page);
  925. }
  926. spin_lock_irqsave(&pool->lock, irq_flags);
  927. pool->npages_in_use -= count;
  928. pool->nfrees += count;
  929. spin_unlock_irqrestore(&pool->lock, irq_flags);
  930. }
  931. #endif
  932. pool = ttm_dma_find_pool(dev, type);
  933. if (!pool)
  934. return;
  935. is_cached = (ttm_dma_find_pool(pool->dev,
  936. ttm_to_type(ttm->page_flags, tt_cached)) == pool);
  937. /* make sure pages array match list and count number of pages */
  938. count = 0;
  939. list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
  940. page_list) {
  941. ttm->pages[count] = d_page->p;
  942. count++;
  943. if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
  944. ttm_mem_global_free_page(mem_glob, d_page->p,
  945. pool->size);
  946. d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
  947. }
  948. if (is_cached)
  949. ttm_dma_page_put(pool, d_page);
  950. }
  951. spin_lock_irqsave(&pool->lock, irq_flags);
  952. pool->npages_in_use -= count;
  953. if (is_cached) {
  954. pool->nfrees += count;
  955. } else {
  956. pool->npages_free += count;
  957. list_splice(&ttm_dma->pages_list, &pool->free_list);
  958. /*
  959. * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages
  960. * to free in order to minimize calls to set_memory_wb().
  961. */
  962. if (pool->npages_free >= (_manager->options.max_size +
  963. NUM_PAGES_TO_ALLOC))
  964. npages = pool->npages_free - _manager->options.max_size;
  965. }
  966. spin_unlock_irqrestore(&pool->lock, irq_flags);
  967. INIT_LIST_HEAD(&ttm_dma->pages_list);
  968. for (i = 0; i < ttm->num_pages; i++) {
  969. ttm->pages[i] = NULL;
  970. ttm_dma->dma_address[i] = 0;
  971. }
  972. /* shrink pool if necessary (only on !is_cached pools)*/
  973. if (npages)
  974. ttm_dma_page_pool_free(pool, npages, false);
  975. ttm->state = tt_unpopulated;
  976. }
  977. EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
  978. /**
  979. * Callback for mm to request pool to reduce number of page held.
  980. *
  981. * XXX: (dchinner) Deadlock warning!
  982. *
  983. * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
  984. * shrinkers
  985. */
  986. static unsigned long
  987. ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
  988. {
  989. static unsigned start_pool;
  990. unsigned idx = 0;
  991. unsigned pool_offset;
  992. unsigned shrink_pages = sc->nr_to_scan;
  993. struct device_pools *p;
  994. unsigned long freed = 0;
  995. if (list_empty(&_manager->pools))
  996. return SHRINK_STOP;
  997. if (!mutex_trylock(&_manager->lock))
  998. return SHRINK_STOP;
  999. if (!_manager->npools)
  1000. goto out;
  1001. pool_offset = ++start_pool % _manager->npools;
  1002. list_for_each_entry(p, &_manager->pools, pools) {
  1003. unsigned nr_free;
  1004. if (!p->dev)
  1005. continue;
  1006. if (shrink_pages == 0)
  1007. break;
  1008. /* Do it in round-robin fashion. */
  1009. if (++idx < pool_offset)
  1010. continue;
  1011. nr_free = shrink_pages;
  1012. /* OK to use static buffer since global mutex is held. */
  1013. shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
  1014. freed += nr_free - shrink_pages;
  1015. pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
  1016. p->pool->dev_name, p->pool->name, current->pid,
  1017. nr_free, shrink_pages);
  1018. }
  1019. out:
  1020. mutex_unlock(&_manager->lock);
  1021. return freed;
  1022. }
  1023. static unsigned long
  1024. ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
  1025. {
  1026. struct device_pools *p;
  1027. unsigned long count = 0;
  1028. if (!mutex_trylock(&_manager->lock))
  1029. return 0;
  1030. list_for_each_entry(p, &_manager->pools, pools)
  1031. count += p->pool->npages_free;
  1032. mutex_unlock(&_manager->lock);
  1033. return count;
  1034. }
  1035. static int ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
  1036. {
  1037. manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
  1038. manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
  1039. manager->mm_shrink.seeks = 1;
  1040. return register_shrinker(&manager->mm_shrink);
  1041. }
  1042. static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
  1043. {
  1044. unregister_shrinker(&manager->mm_shrink);
  1045. }
  1046. int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
  1047. {
  1048. int ret;
  1049. WARN_ON(_manager);
  1050. pr_info("Initializing DMA pool allocator\n");
  1051. _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
  1052. if (!_manager)
  1053. return -ENOMEM;
  1054. mutex_init(&_manager->lock);
  1055. INIT_LIST_HEAD(&_manager->pools);
  1056. _manager->options.max_size = max_pages;
  1057. _manager->options.small = SMALL_ALLOCATION;
  1058. _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
  1059. /* This takes care of auto-freeing the _manager */
  1060. ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
  1061. &glob->kobj, "dma_pool");
  1062. if (unlikely(ret != 0))
  1063. goto error;
  1064. ret = ttm_dma_pool_mm_shrink_init(_manager);
  1065. if (unlikely(ret != 0))
  1066. goto error;
  1067. return 0;
  1068. error:
  1069. kobject_put(&_manager->kobj);
  1070. _manager = NULL;
  1071. return ret;
  1072. }
  1073. void ttm_dma_page_alloc_fini(void)
  1074. {
  1075. struct device_pools *p, *t;
  1076. pr_info("Finalizing DMA pool allocator\n");
  1077. ttm_dma_pool_mm_shrink_fini(_manager);
  1078. list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
  1079. dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
  1080. current->pid);
  1081. WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
  1082. ttm_dma_pool_match, p->pool));
  1083. ttm_dma_free_pool(p->dev, p->pool->type);
  1084. }
  1085. kobject_put(&_manager->kobj);
  1086. _manager = NULL;
  1087. }
  1088. int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
  1089. {
  1090. struct device_pools *p;
  1091. struct dma_pool *pool = NULL;
  1092. if (!_manager) {
  1093. seq_printf(m, "No pool allocator running.\n");
  1094. return 0;
  1095. }
  1096. seq_printf(m, " pool refills pages freed inuse available name\n");
  1097. mutex_lock(&_manager->lock);
  1098. list_for_each_entry(p, &_manager->pools, pools) {
  1099. struct device *dev = p->dev;
  1100. if (!dev)
  1101. continue;
  1102. pool = p->pool;
  1103. seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
  1104. pool->name, pool->nrefills,
  1105. pool->nfrees, pool->npages_in_use,
  1106. pool->npages_free,
  1107. pool->dev_name);
  1108. }
  1109. mutex_unlock(&_manager->lock);
  1110. return 0;
  1111. }
  1112. EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
  1113. #endif