ttm_page_alloc_dma.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238
  1. /*
  2. * Copyright 2011 (c) Oracle Corp.
  3. * Permission is hereby granted, free of charge, to any person obtaining a
  4. * copy of this software and associated documentation files (the "Software"),
  5. * to deal in the Software without restriction, including without limitation
  6. * the rights to use, copy, modify, merge, publish, distribute, sub license,
  7. * and/or sell copies of the Software, and to permit persons to whom the
  8. * Software is furnished to do so, subject to the following conditions:
  9. *
  10. * The above copyright notice and this permission notice (including the
  11. * next paragraph) shall be included in all copies or substantial portions
  12. * of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  17. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  19. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  20. * DEALINGS IN THE SOFTWARE.
  21. *
  22. * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
  23. */
  24. /*
  25. * A simple DMA pool losely based on dmapool.c. It has certain advantages
  26. * over the DMA pools:
  27. * - Pool collects resently freed pages for reuse (and hooks up to
  28. * the shrinker).
  29. * - Tracks currently in use pages
  30. * - Tracks whether the page is UC, WB or cached (and reverts to WB
  31. * when freed).
  32. */
  33. #if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
  34. #define pr_fmt(fmt) "[TTM] " fmt
  35. #include <linux/dma-mapping.h>
  36. #include <linux/list.h>
  37. #include <linux/seq_file.h> /* for seq_printf */
  38. #include <linux/slab.h>
  39. #include <linux/spinlock.h>
  40. #include <linux/highmem.h>
  41. #include <linux/mm_types.h>
  42. #include <linux/module.h>
  43. #include <linux/mm.h>
  44. #include <linux/atomic.h>
  45. #include <linux/device.h>
  46. #include <linux/kthread.h>
  47. #include <drm/ttm/ttm_bo_driver.h>
  48. #include <drm/ttm/ttm_page_alloc.h>
  49. #include <drm/ttm/ttm_set_memory.h>
  50. #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
  51. #define SMALL_ALLOCATION 4
  52. #define FREE_ALL_PAGES (~0U)
  53. #define VADDR_FLAG_HUGE_POOL 1UL
  54. #define VADDR_FLAG_UPDATED_COUNT 2UL
  55. enum pool_type {
  56. IS_UNDEFINED = 0,
  57. IS_WC = 1 << 1,
  58. IS_UC = 1 << 2,
  59. IS_CACHED = 1 << 3,
  60. IS_DMA32 = 1 << 4,
  61. IS_HUGE = 1 << 5
  62. };
  63. /*
  64. * The pool structure. There are up to nine pools:
  65. * - generic (not restricted to DMA32):
  66. * - write combined, uncached, cached.
  67. * - dma32 (up to 2^32 - so up 4GB):
  68. * - write combined, uncached, cached.
  69. * - huge (not restricted to DMA32):
  70. * - write combined, uncached, cached.
  71. * for each 'struct device'. The 'cached' is for pages that are actively used.
  72. * The other ones can be shrunk by the shrinker API if neccessary.
  73. * @pools: The 'struct device->dma_pools' link.
  74. * @type: Type of the pool
  75. * @lock: Protects the free_list from concurrnet access. Must be
  76. * used with irqsave/irqrestore variants because pool allocator maybe called
  77. * from delayed work.
  78. * @free_list: Pool of pages that are free to be used. No order requirements.
  79. * @dev: The device that is associated with these pools.
  80. * @size: Size used during DMA allocation.
  81. * @npages_free: Count of available pages for re-use.
  82. * @npages_in_use: Count of pages that are in use.
  83. * @nfrees: Stats when pool is shrinking.
  84. * @nrefills: Stats when the pool is grown.
  85. * @gfp_flags: Flags to pass for alloc_page.
  86. * @name: Name of the pool.
  87. * @dev_name: Name derieved from dev - similar to how dev_info works.
  88. * Used during shutdown as the dev_info during release is unavailable.
  89. */
  90. struct dma_pool {
  91. struct list_head pools; /* The 'struct device->dma_pools link */
  92. enum pool_type type;
  93. spinlock_t lock;
  94. struct list_head free_list;
  95. struct device *dev;
  96. unsigned size;
  97. unsigned npages_free;
  98. unsigned npages_in_use;
  99. unsigned long nfrees; /* Stats when shrunk. */
  100. unsigned long nrefills; /* Stats when grown. */
  101. gfp_t gfp_flags;
  102. char name[13]; /* "cached dma32" */
  103. char dev_name[64]; /* Constructed from dev */
  104. };
  105. /*
  106. * The accounting page keeping track of the allocated page along with
  107. * the DMA address.
  108. * @page_list: The link to the 'page_list' in 'struct dma_pool'.
  109. * @vaddr: The virtual address of the page and a flag if the page belongs to a
  110. * huge pool
  111. * @dma: The bus address of the page. If the page is not allocated
  112. * via the DMA API, it will be -1.
  113. */
  114. struct dma_page {
  115. struct list_head page_list;
  116. unsigned long vaddr;
  117. struct page *p;
  118. dma_addr_t dma;
  119. };
  120. /*
  121. * Limits for the pool. They are handled without locks because only place where
  122. * they may change is in sysfs store. They won't have immediate effect anyway
  123. * so forcing serialization to access them is pointless.
  124. */
  125. struct ttm_pool_opts {
  126. unsigned alloc_size;
  127. unsigned max_size;
  128. unsigned small;
  129. };
  130. /*
  131. * Contains the list of all of the 'struct device' and their corresponding
  132. * DMA pools. Guarded by _mutex->lock.
  133. * @pools: The link to 'struct ttm_pool_manager->pools'
  134. * @dev: The 'struct device' associated with the 'pool'
  135. * @pool: The 'struct dma_pool' associated with the 'dev'
  136. */
  137. struct device_pools {
  138. struct list_head pools;
  139. struct device *dev;
  140. struct dma_pool *pool;
  141. };
  142. /*
  143. * struct ttm_pool_manager - Holds memory pools for fast allocation
  144. *
  145. * @lock: Lock used when adding/removing from pools
  146. * @pools: List of 'struct device' and 'struct dma_pool' tuples.
  147. * @options: Limits for the pool.
  148. * @npools: Total amount of pools in existence.
  149. * @shrinker: The structure used by [un|]register_shrinker
  150. */
  151. struct ttm_pool_manager {
  152. struct mutex lock;
  153. struct list_head pools;
  154. struct ttm_pool_opts options;
  155. unsigned npools;
  156. struct shrinker mm_shrink;
  157. struct kobject kobj;
  158. };
  159. static struct ttm_pool_manager *_manager;
  160. static struct attribute ttm_page_pool_max = {
  161. .name = "pool_max_size",
  162. .mode = S_IRUGO | S_IWUSR
  163. };
  164. static struct attribute ttm_page_pool_small = {
  165. .name = "pool_small_allocation",
  166. .mode = S_IRUGO | S_IWUSR
  167. };
  168. static struct attribute ttm_page_pool_alloc_size = {
  169. .name = "pool_allocation_size",
  170. .mode = S_IRUGO | S_IWUSR
  171. };
  172. static struct attribute *ttm_pool_attrs[] = {
  173. &ttm_page_pool_max,
  174. &ttm_page_pool_small,
  175. &ttm_page_pool_alloc_size,
  176. NULL
  177. };
  178. static void ttm_pool_kobj_release(struct kobject *kobj)
  179. {
  180. struct ttm_pool_manager *m =
  181. container_of(kobj, struct ttm_pool_manager, kobj);
  182. kfree(m);
  183. }
  184. static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
  185. const char *buffer, size_t size)
  186. {
  187. struct ttm_pool_manager *m =
  188. container_of(kobj, struct ttm_pool_manager, kobj);
  189. int chars;
  190. unsigned val;
  191. chars = sscanf(buffer, "%u", &val);
  192. if (chars == 0)
  193. return size;
  194. /* Convert kb to number of pages */
  195. val = val / (PAGE_SIZE >> 10);
  196. if (attr == &ttm_page_pool_max) {
  197. m->options.max_size = val;
  198. } else if (attr == &ttm_page_pool_small) {
  199. m->options.small = val;
  200. } else if (attr == &ttm_page_pool_alloc_size) {
  201. if (val > NUM_PAGES_TO_ALLOC*8) {
  202. pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
  203. NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
  204. NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
  205. return size;
  206. } else if (val > NUM_PAGES_TO_ALLOC) {
  207. pr_warn("Setting allocation size to larger than %lu is not recommended\n",
  208. NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
  209. }
  210. m->options.alloc_size = val;
  211. }
  212. return size;
  213. }
  214. static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
  215. char *buffer)
  216. {
  217. struct ttm_pool_manager *m =
  218. container_of(kobj, struct ttm_pool_manager, kobj);
  219. unsigned val = 0;
  220. if (attr == &ttm_page_pool_max)
  221. val = m->options.max_size;
  222. else if (attr == &ttm_page_pool_small)
  223. val = m->options.small;
  224. else if (attr == &ttm_page_pool_alloc_size)
  225. val = m->options.alloc_size;
  226. val = val * (PAGE_SIZE >> 10);
  227. return snprintf(buffer, PAGE_SIZE, "%u\n", val);
  228. }
  229. static const struct sysfs_ops ttm_pool_sysfs_ops = {
  230. .show = &ttm_pool_show,
  231. .store = &ttm_pool_store,
  232. };
  233. static struct kobj_type ttm_pool_kobj_type = {
  234. .release = &ttm_pool_kobj_release,
  235. .sysfs_ops = &ttm_pool_sysfs_ops,
  236. .default_attrs = ttm_pool_attrs,
  237. };
  238. static int ttm_set_pages_caching(struct dma_pool *pool,
  239. struct page **pages, unsigned cpages)
  240. {
  241. int r = 0;
  242. /* Set page caching */
  243. if (pool->type & IS_UC) {
  244. r = ttm_set_pages_array_uc(pages, cpages);
  245. if (r)
  246. pr_err("%s: Failed to set %d pages to uc!\n",
  247. pool->dev_name, cpages);
  248. }
  249. if (pool->type & IS_WC) {
  250. r = ttm_set_pages_array_wc(pages, cpages);
  251. if (r)
  252. pr_err("%s: Failed to set %d pages to wc!\n",
  253. pool->dev_name, cpages);
  254. }
  255. return r;
  256. }
  257. static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
  258. {
  259. dma_addr_t dma = d_page->dma;
  260. d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL;
  261. dma_free_coherent(pool->dev, pool->size, (void *)d_page->vaddr, dma);
  262. kfree(d_page);
  263. d_page = NULL;
  264. }
  265. static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
  266. {
  267. struct dma_page *d_page;
  268. unsigned long attrs = 0;
  269. void *vaddr;
  270. d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
  271. if (!d_page)
  272. return NULL;
  273. if (pool->type & IS_HUGE)
  274. attrs = DMA_ATTR_NO_WARN;
  275. vaddr = dma_alloc_attrs(pool->dev, pool->size, &d_page->dma,
  276. pool->gfp_flags, attrs);
  277. if (vaddr) {
  278. if (is_vmalloc_addr(vaddr))
  279. d_page->p = vmalloc_to_page(vaddr);
  280. else
  281. d_page->p = virt_to_page(vaddr);
  282. d_page->vaddr = (unsigned long)vaddr;
  283. if (pool->type & IS_HUGE)
  284. d_page->vaddr |= VADDR_FLAG_HUGE_POOL;
  285. } else {
  286. kfree(d_page);
  287. d_page = NULL;
  288. }
  289. return d_page;
  290. }
  291. static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
  292. {
  293. enum pool_type type = IS_UNDEFINED;
  294. if (flags & TTM_PAGE_FLAG_DMA32)
  295. type |= IS_DMA32;
  296. if (cstate == tt_cached)
  297. type |= IS_CACHED;
  298. else if (cstate == tt_uncached)
  299. type |= IS_UC;
  300. else
  301. type |= IS_WC;
  302. return type;
  303. }
  304. static void ttm_pool_update_free_locked(struct dma_pool *pool,
  305. unsigned freed_pages)
  306. {
  307. pool->npages_free -= freed_pages;
  308. pool->nfrees += freed_pages;
  309. }
  310. /* set memory back to wb and free the pages. */
  311. static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
  312. {
  313. struct page *page = d_page->p;
  314. unsigned num_pages;
  315. /* Don't set WB on WB page pool. */
  316. if (!(pool->type & IS_CACHED)) {
  317. num_pages = pool->size / PAGE_SIZE;
  318. if (ttm_set_pages_wb(page, num_pages))
  319. pr_err("%s: Failed to set %d pages to wb!\n",
  320. pool->dev_name, num_pages);
  321. }
  322. list_del(&d_page->page_list);
  323. __ttm_dma_free_page(pool, d_page);
  324. }
  325. static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
  326. struct page *pages[], unsigned npages)
  327. {
  328. struct dma_page *d_page, *tmp;
  329. if (pool->type & IS_HUGE) {
  330. list_for_each_entry_safe(d_page, tmp, d_pages, page_list)
  331. ttm_dma_page_put(pool, d_page);
  332. return;
  333. }
  334. /* Don't set WB on WB page pool. */
  335. if (npages && !(pool->type & IS_CACHED) &&
  336. ttm_set_pages_array_wb(pages, npages))
  337. pr_err("%s: Failed to set %d pages to wb!\n",
  338. pool->dev_name, npages);
  339. list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
  340. list_del(&d_page->page_list);
  341. __ttm_dma_free_page(pool, d_page);
  342. }
  343. }
  344. /*
  345. * Free pages from pool.
  346. *
  347. * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
  348. * number of pages in one go.
  349. *
  350. * @pool: to free the pages from
  351. * @nr_free: If set to true will free all pages in pool
  352. * @use_static: Safe to use static buffer
  353. **/
  354. static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
  355. bool use_static)
  356. {
  357. static struct page *static_buf[NUM_PAGES_TO_ALLOC];
  358. unsigned long irq_flags;
  359. struct dma_page *dma_p, *tmp;
  360. struct page **pages_to_free;
  361. struct list_head d_pages;
  362. unsigned freed_pages = 0,
  363. npages_to_free = nr_free;
  364. if (NUM_PAGES_TO_ALLOC < nr_free)
  365. npages_to_free = NUM_PAGES_TO_ALLOC;
  366. if (use_static)
  367. pages_to_free = static_buf;
  368. else
  369. pages_to_free = kmalloc_array(npages_to_free,
  370. sizeof(struct page *),
  371. GFP_KERNEL);
  372. if (!pages_to_free) {
  373. pr_debug("%s: Failed to allocate memory for pool free operation\n",
  374. pool->dev_name);
  375. return 0;
  376. }
  377. INIT_LIST_HEAD(&d_pages);
  378. restart:
  379. spin_lock_irqsave(&pool->lock, irq_flags);
  380. /* We picking the oldest ones off the list */
  381. list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
  382. page_list) {
  383. if (freed_pages >= npages_to_free)
  384. break;
  385. /* Move the dma_page from one list to another. */
  386. list_move(&dma_p->page_list, &d_pages);
  387. pages_to_free[freed_pages++] = dma_p->p;
  388. /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
  389. if (freed_pages >= NUM_PAGES_TO_ALLOC) {
  390. ttm_pool_update_free_locked(pool, freed_pages);
  391. /**
  392. * Because changing page caching is costly
  393. * we unlock the pool to prevent stalling.
  394. */
  395. spin_unlock_irqrestore(&pool->lock, irq_flags);
  396. ttm_dma_pages_put(pool, &d_pages, pages_to_free,
  397. freed_pages);
  398. INIT_LIST_HEAD(&d_pages);
  399. if (likely(nr_free != FREE_ALL_PAGES))
  400. nr_free -= freed_pages;
  401. if (NUM_PAGES_TO_ALLOC >= nr_free)
  402. npages_to_free = nr_free;
  403. else
  404. npages_to_free = NUM_PAGES_TO_ALLOC;
  405. freed_pages = 0;
  406. /* free all so restart the processing */
  407. if (nr_free)
  408. goto restart;
  409. /* Not allowed to fall through or break because
  410. * following context is inside spinlock while we are
  411. * outside here.
  412. */
  413. goto out;
  414. }
  415. }
  416. /* remove range of pages from the pool */
  417. if (freed_pages) {
  418. ttm_pool_update_free_locked(pool, freed_pages);
  419. nr_free -= freed_pages;
  420. }
  421. spin_unlock_irqrestore(&pool->lock, irq_flags);
  422. if (freed_pages)
  423. ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
  424. out:
  425. if (pages_to_free != static_buf)
  426. kfree(pages_to_free);
  427. return nr_free;
  428. }
  429. static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
  430. {
  431. struct device_pools *p;
  432. struct dma_pool *pool;
  433. if (!dev)
  434. return;
  435. mutex_lock(&_manager->lock);
  436. list_for_each_entry_reverse(p, &_manager->pools, pools) {
  437. if (p->dev != dev)
  438. continue;
  439. pool = p->pool;
  440. if (pool->type != type)
  441. continue;
  442. list_del(&p->pools);
  443. kfree(p);
  444. _manager->npools--;
  445. break;
  446. }
  447. list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
  448. if (pool->type != type)
  449. continue;
  450. /* Takes a spinlock.. */
  451. /* OK to use static buffer since global mutex is held. */
  452. ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true);
  453. WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
  454. /* This code path is called after _all_ references to the
  455. * struct device has been dropped - so nobody should be
  456. * touching it. In case somebody is trying to _add_ we are
  457. * guarded by the mutex. */
  458. list_del(&pool->pools);
  459. kfree(pool);
  460. break;
  461. }
  462. mutex_unlock(&_manager->lock);
  463. }
  464. /*
  465. * On free-ing of the 'struct device' this deconstructor is run.
  466. * Albeit the pool might have already been freed earlier.
  467. */
  468. static void ttm_dma_pool_release(struct device *dev, void *res)
  469. {
  470. struct dma_pool *pool = *(struct dma_pool **)res;
  471. if (pool)
  472. ttm_dma_free_pool(dev, pool->type);
  473. }
  474. static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
  475. {
  476. return *(struct dma_pool **)res == match_data;
  477. }
  478. static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
  479. enum pool_type type)
  480. {
  481. const char *n[] = {"wc", "uc", "cached", " dma32", "huge"};
  482. enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_HUGE};
  483. struct device_pools *sec_pool = NULL;
  484. struct dma_pool *pool = NULL, **ptr;
  485. unsigned i;
  486. int ret = -ENODEV;
  487. char *p;
  488. if (!dev)
  489. return NULL;
  490. ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
  491. if (!ptr)
  492. return NULL;
  493. ret = -ENOMEM;
  494. pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
  495. dev_to_node(dev));
  496. if (!pool)
  497. goto err_mem;
  498. sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
  499. dev_to_node(dev));
  500. if (!sec_pool)
  501. goto err_mem;
  502. INIT_LIST_HEAD(&sec_pool->pools);
  503. sec_pool->dev = dev;
  504. sec_pool->pool = pool;
  505. INIT_LIST_HEAD(&pool->free_list);
  506. INIT_LIST_HEAD(&pool->pools);
  507. spin_lock_init(&pool->lock);
  508. pool->dev = dev;
  509. pool->npages_free = pool->npages_in_use = 0;
  510. pool->nfrees = 0;
  511. pool->gfp_flags = flags;
  512. if (type & IS_HUGE)
  513. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  514. pool->size = HPAGE_PMD_SIZE;
  515. #else
  516. BUG();
  517. #endif
  518. else
  519. pool->size = PAGE_SIZE;
  520. pool->type = type;
  521. pool->nrefills = 0;
  522. p = pool->name;
  523. for (i = 0; i < ARRAY_SIZE(t); i++) {
  524. if (type & t[i]) {
  525. p += snprintf(p, sizeof(pool->name) - (p - pool->name),
  526. "%s", n[i]);
  527. }
  528. }
  529. *p = 0;
  530. /* We copy the name for pr_ calls b/c when dma_pool_destroy is called
  531. * - the kobj->name has already been deallocated.*/
  532. snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
  533. dev_driver_string(dev), dev_name(dev));
  534. mutex_lock(&_manager->lock);
  535. /* You can get the dma_pool from either the global: */
  536. list_add(&sec_pool->pools, &_manager->pools);
  537. _manager->npools++;
  538. /* or from 'struct device': */
  539. list_add(&pool->pools, &dev->dma_pools);
  540. mutex_unlock(&_manager->lock);
  541. *ptr = pool;
  542. devres_add(dev, ptr);
  543. return pool;
  544. err_mem:
  545. devres_free(ptr);
  546. kfree(sec_pool);
  547. kfree(pool);
  548. return ERR_PTR(ret);
  549. }
  550. static struct dma_pool *ttm_dma_find_pool(struct device *dev,
  551. enum pool_type type)
  552. {
  553. struct dma_pool *pool, *tmp;
  554. if (type == IS_UNDEFINED)
  555. return NULL;
  556. /* NB: We iterate on the 'struct dev' which has no spinlock, but
  557. * it does have a kref which we have taken. The kref is taken during
  558. * graphic driver loading - in the drm_pci_init it calls either
  559. * pci_dev_get or pci_register_driver which both end up taking a kref
  560. * on 'struct device'.
  561. *
  562. * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
  563. * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
  564. * thing is at that point of time there are no pages associated with the
  565. * driver so this function will not be called.
  566. */
  567. list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools)
  568. if (pool->type == type)
  569. return pool;
  570. return NULL;
  571. }
  572. /*
  573. * Free pages the pages that failed to change the caching state. If there
  574. * are pages that have changed their caching state already put them to the
  575. * pool.
  576. */
  577. static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
  578. struct list_head *d_pages,
  579. struct page **failed_pages,
  580. unsigned cpages)
  581. {
  582. struct dma_page *d_page, *tmp;
  583. struct page *p;
  584. unsigned i = 0;
  585. p = failed_pages[0];
  586. if (!p)
  587. return;
  588. /* Find the failed page. */
  589. list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
  590. if (d_page->p != p)
  591. continue;
  592. /* .. and then progress over the full list. */
  593. list_del(&d_page->page_list);
  594. __ttm_dma_free_page(pool, d_page);
  595. if (++i < cpages)
  596. p = failed_pages[i];
  597. else
  598. break;
  599. }
  600. }
  601. /*
  602. * Allocate 'count' pages, and put 'need' number of them on the
  603. * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
  604. * The full list of pages should also be on 'd_pages'.
  605. * We return zero for success, and negative numbers as errors.
  606. */
  607. static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
  608. struct list_head *d_pages,
  609. unsigned count)
  610. {
  611. struct page **caching_array;
  612. struct dma_page *dma_p;
  613. struct page *p;
  614. int r = 0;
  615. unsigned i, j, npages, cpages;
  616. unsigned max_cpages = min(count,
  617. (unsigned)(PAGE_SIZE/sizeof(struct page *)));
  618. /* allocate array for page caching change */
  619. caching_array = kmalloc_array(max_cpages, sizeof(struct page *),
  620. GFP_KERNEL);
  621. if (!caching_array) {
  622. pr_debug("%s: Unable to allocate table for new pages\n",
  623. pool->dev_name);
  624. return -ENOMEM;
  625. }
  626. if (count > 1)
  627. pr_debug("%s: (%s:%d) Getting %d pages\n",
  628. pool->dev_name, pool->name, current->pid, count);
  629. for (i = 0, cpages = 0; i < count; ++i) {
  630. dma_p = __ttm_dma_alloc_page(pool);
  631. if (!dma_p) {
  632. pr_debug("%s: Unable to get page %u\n",
  633. pool->dev_name, i);
  634. /* store already allocated pages in the pool after
  635. * setting the caching state */
  636. if (cpages) {
  637. r = ttm_set_pages_caching(pool, caching_array,
  638. cpages);
  639. if (r)
  640. ttm_dma_handle_caching_state_failure(
  641. pool, d_pages, caching_array,
  642. cpages);
  643. }
  644. r = -ENOMEM;
  645. goto out;
  646. }
  647. p = dma_p->p;
  648. list_add(&dma_p->page_list, d_pages);
  649. #ifdef CONFIG_HIGHMEM
  650. /* gfp flags of highmem page should never be dma32 so we
  651. * we should be fine in such case
  652. */
  653. if (PageHighMem(p))
  654. continue;
  655. #endif
  656. npages = pool->size / PAGE_SIZE;
  657. for (j = 0; j < npages; ++j) {
  658. caching_array[cpages++] = p + j;
  659. if (cpages == max_cpages) {
  660. /* Note: Cannot hold the spinlock */
  661. r = ttm_set_pages_caching(pool, caching_array,
  662. cpages);
  663. if (r) {
  664. ttm_dma_handle_caching_state_failure(
  665. pool, d_pages, caching_array,
  666. cpages);
  667. goto out;
  668. }
  669. cpages = 0;
  670. }
  671. }
  672. }
  673. if (cpages) {
  674. r = ttm_set_pages_caching(pool, caching_array, cpages);
  675. if (r)
  676. ttm_dma_handle_caching_state_failure(pool, d_pages,
  677. caching_array, cpages);
  678. }
  679. out:
  680. kfree(caching_array);
  681. return r;
  682. }
  683. /*
  684. * @return count of pages still required to fulfill the request.
  685. */
  686. static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
  687. unsigned long *irq_flags)
  688. {
  689. unsigned count = _manager->options.small;
  690. int r = pool->npages_free;
  691. if (count > pool->npages_free) {
  692. struct list_head d_pages;
  693. INIT_LIST_HEAD(&d_pages);
  694. spin_unlock_irqrestore(&pool->lock, *irq_flags);
  695. /* Returns how many more are neccessary to fulfill the
  696. * request. */
  697. r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
  698. spin_lock_irqsave(&pool->lock, *irq_flags);
  699. if (!r) {
  700. /* Add the fresh to the end.. */
  701. list_splice(&d_pages, &pool->free_list);
  702. ++pool->nrefills;
  703. pool->npages_free += count;
  704. r = count;
  705. } else {
  706. struct dma_page *d_page;
  707. unsigned cpages = 0;
  708. pr_debug("%s: Failed to fill %s pool (r:%d)!\n",
  709. pool->dev_name, pool->name, r);
  710. list_for_each_entry(d_page, &d_pages, page_list) {
  711. cpages++;
  712. }
  713. list_splice_tail(&d_pages, &pool->free_list);
  714. pool->npages_free += cpages;
  715. r = cpages;
  716. }
  717. }
  718. return r;
  719. }
  720. /*
  721. * The populate list is actually a stack (not that is matters as TTM
  722. * allocates one page at a time.
  723. * return dma_page pointer if success, otherwise NULL.
  724. */
  725. static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool,
  726. struct ttm_dma_tt *ttm_dma,
  727. unsigned index)
  728. {
  729. struct dma_page *d_page = NULL;
  730. struct ttm_tt *ttm = &ttm_dma->ttm;
  731. unsigned long irq_flags;
  732. int count;
  733. spin_lock_irqsave(&pool->lock, irq_flags);
  734. count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
  735. if (count) {
  736. d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
  737. ttm->pages[index] = d_page->p;
  738. ttm_dma->dma_address[index] = d_page->dma;
  739. list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
  740. pool->npages_in_use += 1;
  741. pool->npages_free -= 1;
  742. }
  743. spin_unlock_irqrestore(&pool->lock, irq_flags);
  744. return d_page;
  745. }
  746. static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
  747. {
  748. struct ttm_tt *ttm = &ttm_dma->ttm;
  749. gfp_t gfp_flags;
  750. if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
  751. gfp_flags = GFP_USER | GFP_DMA32;
  752. else
  753. gfp_flags = GFP_HIGHUSER;
  754. if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
  755. gfp_flags |= __GFP_ZERO;
  756. if (huge) {
  757. gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
  758. __GFP_KSWAPD_RECLAIM;
  759. gfp_flags &= ~__GFP_MOVABLE;
  760. gfp_flags &= ~__GFP_COMP;
  761. }
  762. if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
  763. gfp_flags |= __GFP_RETRY_MAYFAIL;
  764. return gfp_flags;
  765. }
  766. /*
  767. * On success pages list will hold count number of correctly
  768. * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
  769. */
  770. int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
  771. struct ttm_operation_ctx *ctx)
  772. {
  773. struct ttm_tt *ttm = &ttm_dma->ttm;
  774. struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
  775. unsigned long num_pages = ttm->num_pages;
  776. struct dma_pool *pool;
  777. struct dma_page *d_page;
  778. enum pool_type type;
  779. unsigned i;
  780. int ret;
  781. if (ttm->state != tt_unpopulated)
  782. return 0;
  783. if (ttm_check_under_lowerlimit(mem_glob, num_pages, ctx))
  784. return -ENOMEM;
  785. INIT_LIST_HEAD(&ttm_dma->pages_list);
  786. i = 0;
  787. type = ttm_to_type(ttm->page_flags, ttm->caching_state);
  788. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  789. if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
  790. goto skip_huge;
  791. pool = ttm_dma_find_pool(dev, type | IS_HUGE);
  792. if (!pool) {
  793. gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, true);
  794. pool = ttm_dma_pool_init(dev, gfp_flags, type | IS_HUGE);
  795. if (IS_ERR_OR_NULL(pool))
  796. goto skip_huge;
  797. }
  798. while (num_pages >= HPAGE_PMD_NR) {
  799. unsigned j;
  800. d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
  801. if (!d_page)
  802. break;
  803. ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
  804. pool->size, ctx);
  805. if (unlikely(ret != 0)) {
  806. ttm_dma_unpopulate(ttm_dma, dev);
  807. return -ENOMEM;
  808. }
  809. d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
  810. for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) {
  811. ttm->pages[j] = ttm->pages[j - 1] + 1;
  812. ttm_dma->dma_address[j] = ttm_dma->dma_address[j - 1] +
  813. PAGE_SIZE;
  814. }
  815. i += HPAGE_PMD_NR;
  816. num_pages -= HPAGE_PMD_NR;
  817. }
  818. skip_huge:
  819. #endif
  820. pool = ttm_dma_find_pool(dev, type);
  821. if (!pool) {
  822. gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, false);
  823. pool = ttm_dma_pool_init(dev, gfp_flags, type);
  824. if (IS_ERR_OR_NULL(pool))
  825. return -ENOMEM;
  826. }
  827. while (num_pages) {
  828. d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
  829. if (!d_page) {
  830. ttm_dma_unpopulate(ttm_dma, dev);
  831. return -ENOMEM;
  832. }
  833. ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
  834. pool->size, ctx);
  835. if (unlikely(ret != 0)) {
  836. ttm_dma_unpopulate(ttm_dma, dev);
  837. return -ENOMEM;
  838. }
  839. d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
  840. ++i;
  841. --num_pages;
  842. }
  843. if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
  844. ret = ttm_tt_swapin(ttm);
  845. if (unlikely(ret != 0)) {
  846. ttm_dma_unpopulate(ttm_dma, dev);
  847. return ret;
  848. }
  849. }
  850. ttm->state = tt_unbound;
  851. return 0;
  852. }
  853. EXPORT_SYMBOL_GPL(ttm_dma_populate);
  854. /* Put all pages in pages list to correct pool to wait for reuse */
  855. void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
  856. {
  857. struct ttm_tt *ttm = &ttm_dma->ttm;
  858. struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
  859. struct dma_pool *pool;
  860. struct dma_page *d_page, *next;
  861. enum pool_type type;
  862. bool is_cached = false;
  863. unsigned count, i, npages = 0;
  864. unsigned long irq_flags;
  865. type = ttm_to_type(ttm->page_flags, ttm->caching_state);
  866. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  867. pool = ttm_dma_find_pool(dev, type | IS_HUGE);
  868. if (pool) {
  869. count = 0;
  870. list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
  871. page_list) {
  872. if (!(d_page->vaddr & VADDR_FLAG_HUGE_POOL))
  873. continue;
  874. count++;
  875. if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
  876. ttm_mem_global_free_page(mem_glob, d_page->p,
  877. pool->size);
  878. d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
  879. }
  880. ttm_dma_page_put(pool, d_page);
  881. }
  882. spin_lock_irqsave(&pool->lock, irq_flags);
  883. pool->npages_in_use -= count;
  884. pool->nfrees += count;
  885. spin_unlock_irqrestore(&pool->lock, irq_flags);
  886. }
  887. #endif
  888. pool = ttm_dma_find_pool(dev, type);
  889. if (!pool)
  890. return;
  891. is_cached = (ttm_dma_find_pool(pool->dev,
  892. ttm_to_type(ttm->page_flags, tt_cached)) == pool);
  893. /* make sure pages array match list and count number of pages */
  894. count = 0;
  895. list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
  896. page_list) {
  897. ttm->pages[count] = d_page->p;
  898. count++;
  899. if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
  900. ttm_mem_global_free_page(mem_glob, d_page->p,
  901. pool->size);
  902. d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
  903. }
  904. if (is_cached)
  905. ttm_dma_page_put(pool, d_page);
  906. }
  907. spin_lock_irqsave(&pool->lock, irq_flags);
  908. pool->npages_in_use -= count;
  909. if (is_cached) {
  910. pool->nfrees += count;
  911. } else {
  912. pool->npages_free += count;
  913. list_splice(&ttm_dma->pages_list, &pool->free_list);
  914. /*
  915. * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages
  916. * to free in order to minimize calls to set_memory_wb().
  917. */
  918. if (pool->npages_free >= (_manager->options.max_size +
  919. NUM_PAGES_TO_ALLOC))
  920. npages = pool->npages_free - _manager->options.max_size;
  921. }
  922. spin_unlock_irqrestore(&pool->lock, irq_flags);
  923. INIT_LIST_HEAD(&ttm_dma->pages_list);
  924. for (i = 0; i < ttm->num_pages; i++) {
  925. ttm->pages[i] = NULL;
  926. ttm_dma->dma_address[i] = 0;
  927. }
  928. /* shrink pool if necessary (only on !is_cached pools)*/
  929. if (npages)
  930. ttm_dma_page_pool_free(pool, npages, false);
  931. ttm->state = tt_unpopulated;
  932. }
  933. EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
  934. /**
  935. * Callback for mm to request pool to reduce number of page held.
  936. *
  937. * XXX: (dchinner) Deadlock warning!
  938. *
  939. * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
  940. * shrinkers
  941. */
  942. static unsigned long
  943. ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
  944. {
  945. static unsigned start_pool;
  946. unsigned idx = 0;
  947. unsigned pool_offset;
  948. unsigned shrink_pages = sc->nr_to_scan;
  949. struct device_pools *p;
  950. unsigned long freed = 0;
  951. if (list_empty(&_manager->pools))
  952. return SHRINK_STOP;
  953. if (!mutex_trylock(&_manager->lock))
  954. return SHRINK_STOP;
  955. if (!_manager->npools)
  956. goto out;
  957. pool_offset = ++start_pool % _manager->npools;
  958. list_for_each_entry(p, &_manager->pools, pools) {
  959. unsigned nr_free;
  960. if (!p->dev)
  961. continue;
  962. if (shrink_pages == 0)
  963. break;
  964. /* Do it in round-robin fashion. */
  965. if (++idx < pool_offset)
  966. continue;
  967. nr_free = shrink_pages;
  968. /* OK to use static buffer since global mutex is held. */
  969. shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
  970. freed += nr_free - shrink_pages;
  971. pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
  972. p->pool->dev_name, p->pool->name, current->pid,
  973. nr_free, shrink_pages);
  974. }
  975. out:
  976. mutex_unlock(&_manager->lock);
  977. return freed;
  978. }
  979. static unsigned long
  980. ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
  981. {
  982. struct device_pools *p;
  983. unsigned long count = 0;
  984. if (!mutex_trylock(&_manager->lock))
  985. return 0;
  986. list_for_each_entry(p, &_manager->pools, pools)
  987. count += p->pool->npages_free;
  988. mutex_unlock(&_manager->lock);
  989. return count;
  990. }
  991. static int ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
  992. {
  993. manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
  994. manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
  995. manager->mm_shrink.seeks = 1;
  996. return register_shrinker(&manager->mm_shrink);
  997. }
  998. static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
  999. {
  1000. unregister_shrinker(&manager->mm_shrink);
  1001. }
  1002. int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
  1003. {
  1004. int ret;
  1005. WARN_ON(_manager);
  1006. pr_info("Initializing DMA pool allocator\n");
  1007. _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
  1008. if (!_manager)
  1009. return -ENOMEM;
  1010. mutex_init(&_manager->lock);
  1011. INIT_LIST_HEAD(&_manager->pools);
  1012. _manager->options.max_size = max_pages;
  1013. _manager->options.small = SMALL_ALLOCATION;
  1014. _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
  1015. /* This takes care of auto-freeing the _manager */
  1016. ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
  1017. &glob->kobj, "dma_pool");
  1018. if (unlikely(ret != 0))
  1019. goto error;
  1020. ret = ttm_dma_pool_mm_shrink_init(_manager);
  1021. if (unlikely(ret != 0))
  1022. goto error;
  1023. return 0;
  1024. error:
  1025. kobject_put(&_manager->kobj);
  1026. _manager = NULL;
  1027. return ret;
  1028. }
  1029. void ttm_dma_page_alloc_fini(void)
  1030. {
  1031. struct device_pools *p, *t;
  1032. pr_info("Finalizing DMA pool allocator\n");
  1033. ttm_dma_pool_mm_shrink_fini(_manager);
  1034. list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
  1035. dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
  1036. current->pid);
  1037. WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
  1038. ttm_dma_pool_match, p->pool));
  1039. ttm_dma_free_pool(p->dev, p->pool->type);
  1040. }
  1041. kobject_put(&_manager->kobj);
  1042. _manager = NULL;
  1043. }
  1044. int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
  1045. {
  1046. struct device_pools *p;
  1047. struct dma_pool *pool = NULL;
  1048. if (!_manager) {
  1049. seq_printf(m, "No pool allocator running.\n");
  1050. return 0;
  1051. }
  1052. seq_printf(m, " pool refills pages freed inuse available name\n");
  1053. mutex_lock(&_manager->lock);
  1054. list_for_each_entry(p, &_manager->pools, pools) {
  1055. struct device *dev = p->dev;
  1056. if (!dev)
  1057. continue;
  1058. pool = p->pool;
  1059. seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
  1060. pool->name, pool->nrefills,
  1061. pool->nfrees, pool->npages_in_use,
  1062. pool->npages_free,
  1063. pool->dev_name);
  1064. }
  1065. mutex_unlock(&_manager->lock);
  1066. return 0;
  1067. }
  1068. EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
  1069. #endif