iova.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040
  1. /*
  2. * Copyright © 2006-2009, Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  15. * Place - Suite 330, Boston, MA 02111-1307 USA.
  16. *
  17. * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  18. */
  19. #include <linux/iova.h>
  20. #include <linux/module.h>
  21. #include <linux/slab.h>
  22. #include <linux/smp.h>
  23. #include <linux/bitops.h>
  24. #include <linux/cpu.h>
  25. /* The anchor node sits above the top of the usable address space */
  26. #define IOVA_ANCHOR ~0UL
  27. static bool iova_rcache_insert(struct iova_domain *iovad,
  28. unsigned long pfn,
  29. unsigned long size);
  30. static unsigned long iova_rcache_get(struct iova_domain *iovad,
  31. unsigned long size,
  32. unsigned long limit_pfn);
  33. static void init_iova_rcaches(struct iova_domain *iovad);
  34. static void free_iova_rcaches(struct iova_domain *iovad);
  35. static void fq_destroy_all_entries(struct iova_domain *iovad);
  36. static void fq_flush_timeout(unsigned long data);
  37. void
  38. init_iova_domain(struct iova_domain *iovad, unsigned long granule,
  39. unsigned long start_pfn)
  40. {
  41. /*
  42. * IOVA granularity will normally be equal to the smallest
  43. * supported IOMMU page size; both *must* be capable of
  44. * representing individual CPU pages exactly.
  45. */
  46. BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
  47. spin_lock_init(&iovad->iova_rbtree_lock);
  48. iovad->rbroot = RB_ROOT;
  49. iovad->cached_node = &iovad->anchor.node;
  50. iovad->cached32_node = &iovad->anchor.node;
  51. iovad->granule = granule;
  52. iovad->start_pfn = start_pfn;
  53. iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
  54. iovad->flush_cb = NULL;
  55. iovad->fq = NULL;
  56. iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
  57. rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
  58. rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
  59. init_iova_rcaches(iovad);
  60. }
  61. EXPORT_SYMBOL_GPL(init_iova_domain);
  62. static void free_iova_flush_queue(struct iova_domain *iovad)
  63. {
  64. if (!iovad->fq)
  65. return;
  66. if (timer_pending(&iovad->fq_timer))
  67. del_timer(&iovad->fq_timer);
  68. fq_destroy_all_entries(iovad);
  69. free_percpu(iovad->fq);
  70. iovad->fq = NULL;
  71. iovad->flush_cb = NULL;
  72. iovad->entry_dtor = NULL;
  73. }
  74. int init_iova_flush_queue(struct iova_domain *iovad,
  75. iova_flush_cb flush_cb, iova_entry_dtor entry_dtor)
  76. {
  77. int cpu;
  78. atomic64_set(&iovad->fq_flush_start_cnt, 0);
  79. atomic64_set(&iovad->fq_flush_finish_cnt, 0);
  80. iovad->fq = alloc_percpu(struct iova_fq);
  81. if (!iovad->fq)
  82. return -ENOMEM;
  83. iovad->flush_cb = flush_cb;
  84. iovad->entry_dtor = entry_dtor;
  85. for_each_possible_cpu(cpu) {
  86. struct iova_fq *fq;
  87. fq = per_cpu_ptr(iovad->fq, cpu);
  88. fq->head = 0;
  89. fq->tail = 0;
  90. spin_lock_init(&fq->lock);
  91. }
  92. setup_timer(&iovad->fq_timer, fq_flush_timeout, (unsigned long)iovad);
  93. atomic_set(&iovad->fq_timer_on, 0);
  94. return 0;
  95. }
  96. EXPORT_SYMBOL_GPL(init_iova_flush_queue);
  97. static struct rb_node *
  98. __get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn)
  99. {
  100. if (limit_pfn <= iovad->dma_32bit_pfn)
  101. return iovad->cached32_node;
  102. return iovad->cached_node;
  103. }
  104. static void
  105. __cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new)
  106. {
  107. if (new->pfn_hi < iovad->dma_32bit_pfn)
  108. iovad->cached32_node = &new->node;
  109. else
  110. iovad->cached_node = &new->node;
  111. }
  112. static void
  113. __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
  114. {
  115. struct iova *cached_iova;
  116. cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
  117. if (free->pfn_hi < iovad->dma_32bit_pfn &&
  118. free->pfn_lo >= cached_iova->pfn_lo)
  119. iovad->cached32_node = rb_next(&free->node);
  120. cached_iova = rb_entry(iovad->cached_node, struct iova, node);
  121. if (free->pfn_lo >= cached_iova->pfn_lo)
  122. iovad->cached_node = rb_next(&free->node);
  123. }
  124. /* Insert the iova into domain rbtree by holding writer lock */
  125. static void
  126. iova_insert_rbtree(struct rb_root *root, struct iova *iova,
  127. struct rb_node *start)
  128. {
  129. struct rb_node **new, *parent = NULL;
  130. new = (start) ? &start : &(root->rb_node);
  131. /* Figure out where to put new node */
  132. while (*new) {
  133. struct iova *this = rb_entry(*new, struct iova, node);
  134. parent = *new;
  135. if (iova->pfn_lo < this->pfn_lo)
  136. new = &((*new)->rb_left);
  137. else if (iova->pfn_lo > this->pfn_lo)
  138. new = &((*new)->rb_right);
  139. else {
  140. WARN_ON(1); /* this should not happen */
  141. return;
  142. }
  143. }
  144. /* Add new node and rebalance tree. */
  145. rb_link_node(&iova->node, parent, new);
  146. rb_insert_color(&iova->node, root);
  147. }
  148. static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
  149. unsigned long size, unsigned long limit_pfn,
  150. struct iova *new, bool size_aligned)
  151. {
  152. struct rb_node *curr, *prev;
  153. struct iova *curr_iova;
  154. unsigned long flags;
  155. unsigned long new_pfn;
  156. unsigned long align_mask = ~0UL;
  157. if (size_aligned)
  158. align_mask <<= fls_long(size - 1);
  159. /* Walk the tree backwards */
  160. spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
  161. curr = __get_cached_rbnode(iovad, limit_pfn);
  162. curr_iova = rb_entry(curr, struct iova, node);
  163. do {
  164. limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
  165. new_pfn = (limit_pfn - size) & align_mask;
  166. prev = curr;
  167. curr = rb_prev(curr);
  168. curr_iova = rb_entry(curr, struct iova, node);
  169. } while (curr && new_pfn <= curr_iova->pfn_hi);
  170. if (limit_pfn < size || new_pfn < iovad->start_pfn) {
  171. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  172. return -ENOMEM;
  173. }
  174. /* pfn_lo will point to size aligned address if size_aligned is set */
  175. new->pfn_lo = new_pfn;
  176. new->pfn_hi = new->pfn_lo + size - 1;
  177. /* If we have 'prev', it's a valid place to start the insertion. */
  178. iova_insert_rbtree(&iovad->rbroot, new, prev);
  179. __cached_rbnode_insert_update(iovad, new);
  180. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  181. return 0;
  182. }
  183. static struct kmem_cache *iova_cache;
  184. static unsigned int iova_cache_users;
  185. static DEFINE_MUTEX(iova_cache_mutex);
  186. struct iova *alloc_iova_mem(void)
  187. {
  188. return kmem_cache_alloc(iova_cache, GFP_ATOMIC);
  189. }
  190. EXPORT_SYMBOL(alloc_iova_mem);
  191. void free_iova_mem(struct iova *iova)
  192. {
  193. if (iova->pfn_lo != IOVA_ANCHOR)
  194. kmem_cache_free(iova_cache, iova);
  195. }
  196. EXPORT_SYMBOL(free_iova_mem);
  197. int iova_cache_get(void)
  198. {
  199. mutex_lock(&iova_cache_mutex);
  200. if (!iova_cache_users) {
  201. iova_cache = kmem_cache_create(
  202. "iommu_iova", sizeof(struct iova), 0,
  203. SLAB_HWCACHE_ALIGN, NULL);
  204. if (!iova_cache) {
  205. mutex_unlock(&iova_cache_mutex);
  206. printk(KERN_ERR "Couldn't create iova cache\n");
  207. return -ENOMEM;
  208. }
  209. }
  210. iova_cache_users++;
  211. mutex_unlock(&iova_cache_mutex);
  212. return 0;
  213. }
  214. EXPORT_SYMBOL_GPL(iova_cache_get);
  215. void iova_cache_put(void)
  216. {
  217. mutex_lock(&iova_cache_mutex);
  218. if (WARN_ON(!iova_cache_users)) {
  219. mutex_unlock(&iova_cache_mutex);
  220. return;
  221. }
  222. iova_cache_users--;
  223. if (!iova_cache_users)
  224. kmem_cache_destroy(iova_cache);
  225. mutex_unlock(&iova_cache_mutex);
  226. }
  227. EXPORT_SYMBOL_GPL(iova_cache_put);
  228. /**
  229. * alloc_iova - allocates an iova
  230. * @iovad: - iova domain in question
  231. * @size: - size of page frames to allocate
  232. * @limit_pfn: - max limit address
  233. * @size_aligned: - set if size_aligned address range is required
  234. * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
  235. * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
  236. * flag is set then the allocated address iova->pfn_lo will be naturally
  237. * aligned on roundup_power_of_two(size).
  238. */
  239. struct iova *
  240. alloc_iova(struct iova_domain *iovad, unsigned long size,
  241. unsigned long limit_pfn,
  242. bool size_aligned)
  243. {
  244. struct iova *new_iova;
  245. int ret;
  246. new_iova = alloc_iova_mem();
  247. if (!new_iova)
  248. return NULL;
  249. ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1,
  250. new_iova, size_aligned);
  251. if (ret) {
  252. free_iova_mem(new_iova);
  253. return NULL;
  254. }
  255. return new_iova;
  256. }
  257. EXPORT_SYMBOL_GPL(alloc_iova);
  258. static struct iova *
  259. private_find_iova(struct iova_domain *iovad, unsigned long pfn)
  260. {
  261. struct rb_node *node = iovad->rbroot.rb_node;
  262. assert_spin_locked(&iovad->iova_rbtree_lock);
  263. while (node) {
  264. struct iova *iova = rb_entry(node, struct iova, node);
  265. if (pfn < iova->pfn_lo)
  266. node = node->rb_left;
  267. else if (pfn > iova->pfn_hi)
  268. node = node->rb_right;
  269. else
  270. return iova; /* pfn falls within iova's range */
  271. }
  272. return NULL;
  273. }
  274. static void private_free_iova(struct iova_domain *iovad, struct iova *iova)
  275. {
  276. assert_spin_locked(&iovad->iova_rbtree_lock);
  277. __cached_rbnode_delete_update(iovad, iova);
  278. rb_erase(&iova->node, &iovad->rbroot);
  279. free_iova_mem(iova);
  280. }
  281. /**
  282. * find_iova - finds an iova for a given pfn
  283. * @iovad: - iova domain in question.
  284. * @pfn: - page frame number
  285. * This function finds and returns an iova belonging to the
  286. * given doamin which matches the given pfn.
  287. */
  288. struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
  289. {
  290. unsigned long flags;
  291. struct iova *iova;
  292. /* Take the lock so that no other thread is manipulating the rbtree */
  293. spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
  294. iova = private_find_iova(iovad, pfn);
  295. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  296. return iova;
  297. }
  298. EXPORT_SYMBOL_GPL(find_iova);
  299. /**
  300. * __free_iova - frees the given iova
  301. * @iovad: iova domain in question.
  302. * @iova: iova in question.
  303. * Frees the given iova belonging to the giving domain
  304. */
  305. void
  306. __free_iova(struct iova_domain *iovad, struct iova *iova)
  307. {
  308. unsigned long flags;
  309. spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
  310. private_free_iova(iovad, iova);
  311. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  312. }
  313. EXPORT_SYMBOL_GPL(__free_iova);
  314. /**
  315. * free_iova - finds and frees the iova for a given pfn
  316. * @iovad: - iova domain in question.
  317. * @pfn: - pfn that is allocated previously
  318. * This functions finds an iova for a given pfn and then
  319. * frees the iova from that domain.
  320. */
  321. void
  322. free_iova(struct iova_domain *iovad, unsigned long pfn)
  323. {
  324. struct iova *iova = find_iova(iovad, pfn);
  325. if (iova)
  326. __free_iova(iovad, iova);
  327. }
  328. EXPORT_SYMBOL_GPL(free_iova);
  329. /**
  330. * alloc_iova_fast - allocates an iova from rcache
  331. * @iovad: - iova domain in question
  332. * @size: - size of page frames to allocate
  333. * @limit_pfn: - max limit address
  334. * @flush_rcache: - set to flush rcache on regular allocation failure
  335. * This function tries to satisfy an iova allocation from the rcache,
  336. * and falls back to regular allocation on failure. If regular allocation
  337. * fails too and the flush_rcache flag is set then the rcache will be flushed.
  338. */
  339. unsigned long
  340. alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
  341. unsigned long limit_pfn, bool flush_rcache)
  342. {
  343. unsigned long iova_pfn;
  344. struct iova *new_iova;
  345. iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1);
  346. if (iova_pfn)
  347. return iova_pfn;
  348. retry:
  349. new_iova = alloc_iova(iovad, size, limit_pfn, true);
  350. if (!new_iova) {
  351. unsigned int cpu;
  352. if (!flush_rcache)
  353. return 0;
  354. /* Try replenishing IOVAs by flushing rcache. */
  355. flush_rcache = false;
  356. for_each_online_cpu(cpu)
  357. free_cpu_cached_iovas(cpu, iovad);
  358. goto retry;
  359. }
  360. return new_iova->pfn_lo;
  361. }
  362. EXPORT_SYMBOL_GPL(alloc_iova_fast);
  363. /**
  364. * free_iova_fast - free iova pfn range into rcache
  365. * @iovad: - iova domain in question.
  366. * @pfn: - pfn that is allocated previously
  367. * @size: - # of pages in range
  368. * This functions frees an iova range by trying to put it into the rcache,
  369. * falling back to regular iova deallocation via free_iova() if this fails.
  370. */
  371. void
  372. free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
  373. {
  374. if (iova_rcache_insert(iovad, pfn, size))
  375. return;
  376. free_iova(iovad, pfn);
  377. }
  378. EXPORT_SYMBOL_GPL(free_iova_fast);
  379. #define fq_ring_for_each(i, fq) \
  380. for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
  381. static inline bool fq_full(struct iova_fq *fq)
  382. {
  383. assert_spin_locked(&fq->lock);
  384. return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head);
  385. }
  386. static inline unsigned fq_ring_add(struct iova_fq *fq)
  387. {
  388. unsigned idx = fq->tail;
  389. assert_spin_locked(&fq->lock);
  390. fq->tail = (idx + 1) % IOVA_FQ_SIZE;
  391. return idx;
  392. }
  393. static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq)
  394. {
  395. u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt);
  396. unsigned idx;
  397. assert_spin_locked(&fq->lock);
  398. fq_ring_for_each(idx, fq) {
  399. if (fq->entries[idx].counter >= counter)
  400. break;
  401. if (iovad->entry_dtor)
  402. iovad->entry_dtor(fq->entries[idx].data);
  403. free_iova_fast(iovad,
  404. fq->entries[idx].iova_pfn,
  405. fq->entries[idx].pages);
  406. fq->head = (fq->head + 1) % IOVA_FQ_SIZE;
  407. }
  408. }
  409. static void iova_domain_flush(struct iova_domain *iovad)
  410. {
  411. atomic64_inc(&iovad->fq_flush_start_cnt);
  412. iovad->flush_cb(iovad);
  413. atomic64_inc(&iovad->fq_flush_finish_cnt);
  414. }
  415. static void fq_destroy_all_entries(struct iova_domain *iovad)
  416. {
  417. int cpu;
  418. /*
  419. * This code runs when the iova_domain is being detroyed, so don't
  420. * bother to free iovas, just call the entry_dtor on all remaining
  421. * entries.
  422. */
  423. if (!iovad->entry_dtor)
  424. return;
  425. for_each_possible_cpu(cpu) {
  426. struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu);
  427. int idx;
  428. fq_ring_for_each(idx, fq)
  429. iovad->entry_dtor(fq->entries[idx].data);
  430. }
  431. }
  432. static void fq_flush_timeout(unsigned long data)
  433. {
  434. struct iova_domain *iovad = (struct iova_domain *)data;
  435. int cpu;
  436. atomic_set(&iovad->fq_timer_on, 0);
  437. iova_domain_flush(iovad);
  438. for_each_possible_cpu(cpu) {
  439. unsigned long flags;
  440. struct iova_fq *fq;
  441. fq = per_cpu_ptr(iovad->fq, cpu);
  442. spin_lock_irqsave(&fq->lock, flags);
  443. fq_ring_free(iovad, fq);
  444. spin_unlock_irqrestore(&fq->lock, flags);
  445. }
  446. }
  447. void queue_iova(struct iova_domain *iovad,
  448. unsigned long pfn, unsigned long pages,
  449. unsigned long data)
  450. {
  451. struct iova_fq *fq = get_cpu_ptr(iovad->fq);
  452. unsigned long flags;
  453. unsigned idx;
  454. spin_lock_irqsave(&fq->lock, flags);
  455. /*
  456. * First remove all entries from the flush queue that have already been
  457. * flushed out on another CPU. This makes the fq_full() check below less
  458. * likely to be true.
  459. */
  460. fq_ring_free(iovad, fq);
  461. if (fq_full(fq)) {
  462. iova_domain_flush(iovad);
  463. fq_ring_free(iovad, fq);
  464. }
  465. idx = fq_ring_add(fq);
  466. fq->entries[idx].iova_pfn = pfn;
  467. fq->entries[idx].pages = pages;
  468. fq->entries[idx].data = data;
  469. fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt);
  470. spin_unlock_irqrestore(&fq->lock, flags);
  471. if (atomic_cmpxchg(&iovad->fq_timer_on, 0, 1) == 0)
  472. mod_timer(&iovad->fq_timer,
  473. jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
  474. put_cpu_ptr(iovad->fq);
  475. }
  476. EXPORT_SYMBOL_GPL(queue_iova);
  477. /**
  478. * put_iova_domain - destroys the iova doamin
  479. * @iovad: - iova domain in question.
  480. * All the iova's in that domain are destroyed.
  481. */
  482. void put_iova_domain(struct iova_domain *iovad)
  483. {
  484. struct iova *iova, *tmp;
  485. free_iova_flush_queue(iovad);
  486. free_iova_rcaches(iovad);
  487. rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
  488. free_iova_mem(iova);
  489. }
  490. EXPORT_SYMBOL_GPL(put_iova_domain);
  491. static int
  492. __is_range_overlap(struct rb_node *node,
  493. unsigned long pfn_lo, unsigned long pfn_hi)
  494. {
  495. struct iova *iova = rb_entry(node, struct iova, node);
  496. if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
  497. return 1;
  498. return 0;
  499. }
  500. static inline struct iova *
  501. alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi)
  502. {
  503. struct iova *iova;
  504. iova = alloc_iova_mem();
  505. if (iova) {
  506. iova->pfn_lo = pfn_lo;
  507. iova->pfn_hi = pfn_hi;
  508. }
  509. return iova;
  510. }
  511. static struct iova *
  512. __insert_new_range(struct iova_domain *iovad,
  513. unsigned long pfn_lo, unsigned long pfn_hi)
  514. {
  515. struct iova *iova;
  516. iova = alloc_and_init_iova(pfn_lo, pfn_hi);
  517. if (iova)
  518. iova_insert_rbtree(&iovad->rbroot, iova, NULL);
  519. return iova;
  520. }
  521. static void
  522. __adjust_overlap_range(struct iova *iova,
  523. unsigned long *pfn_lo, unsigned long *pfn_hi)
  524. {
  525. if (*pfn_lo < iova->pfn_lo)
  526. iova->pfn_lo = *pfn_lo;
  527. if (*pfn_hi > iova->pfn_hi)
  528. *pfn_lo = iova->pfn_hi + 1;
  529. }
  530. /**
  531. * reserve_iova - reserves an iova in the given range
  532. * @iovad: - iova domain pointer
  533. * @pfn_lo: - lower page frame address
  534. * @pfn_hi:- higher pfn adderss
  535. * This function allocates reserves the address range from pfn_lo to pfn_hi so
  536. * that this address is not dished out as part of alloc_iova.
  537. */
  538. struct iova *
  539. reserve_iova(struct iova_domain *iovad,
  540. unsigned long pfn_lo, unsigned long pfn_hi)
  541. {
  542. struct rb_node *node;
  543. unsigned long flags;
  544. struct iova *iova;
  545. unsigned int overlap = 0;
  546. /* Don't allow nonsensical pfns */
  547. if (WARN_ON((pfn_hi | pfn_lo) > (ULLONG_MAX >> iova_shift(iovad))))
  548. return NULL;
  549. spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
  550. for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
  551. if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
  552. iova = rb_entry(node, struct iova, node);
  553. __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
  554. if ((pfn_lo >= iova->pfn_lo) &&
  555. (pfn_hi <= iova->pfn_hi))
  556. goto finish;
  557. overlap = 1;
  558. } else if (overlap)
  559. break;
  560. }
  561. /* We are here either because this is the first reserver node
  562. * or need to insert remaining non overlap addr range
  563. */
  564. iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
  565. finish:
  566. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  567. return iova;
  568. }
  569. EXPORT_SYMBOL_GPL(reserve_iova);
  570. /**
  571. * copy_reserved_iova - copies the reserved between domains
  572. * @from: - source doamin from where to copy
  573. * @to: - destination domin where to copy
  574. * This function copies reserved iova's from one doamin to
  575. * other.
  576. */
  577. void
  578. copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
  579. {
  580. unsigned long flags;
  581. struct rb_node *node;
  582. spin_lock_irqsave(&from->iova_rbtree_lock, flags);
  583. for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
  584. struct iova *iova = rb_entry(node, struct iova, node);
  585. struct iova *new_iova;
  586. if (iova->pfn_lo == IOVA_ANCHOR)
  587. continue;
  588. new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
  589. if (!new_iova)
  590. printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
  591. iova->pfn_lo, iova->pfn_lo);
  592. }
  593. spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
  594. }
  595. EXPORT_SYMBOL_GPL(copy_reserved_iova);
  596. struct iova *
  597. split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
  598. unsigned long pfn_lo, unsigned long pfn_hi)
  599. {
  600. unsigned long flags;
  601. struct iova *prev = NULL, *next = NULL;
  602. spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
  603. if (iova->pfn_lo < pfn_lo) {
  604. prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1);
  605. if (prev == NULL)
  606. goto error;
  607. }
  608. if (iova->pfn_hi > pfn_hi) {
  609. next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi);
  610. if (next == NULL)
  611. goto error;
  612. }
  613. __cached_rbnode_delete_update(iovad, iova);
  614. rb_erase(&iova->node, &iovad->rbroot);
  615. if (prev) {
  616. iova_insert_rbtree(&iovad->rbroot, prev, NULL);
  617. iova->pfn_lo = pfn_lo;
  618. }
  619. if (next) {
  620. iova_insert_rbtree(&iovad->rbroot, next, NULL);
  621. iova->pfn_hi = pfn_hi;
  622. }
  623. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  624. return iova;
  625. error:
  626. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  627. if (prev)
  628. free_iova_mem(prev);
  629. return NULL;
  630. }
  631. /*
  632. * Magazine caches for IOVA ranges. For an introduction to magazines,
  633. * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab
  634. * Allocator to Many CPUs and Arbitrary Resources" by Bonwick and Adams.
  635. * For simplicity, we use a static magazine size and don't implement the
  636. * dynamic size tuning described in the paper.
  637. */
  638. #define IOVA_MAG_SIZE 128
  639. struct iova_magazine {
  640. unsigned long size;
  641. unsigned long pfns[IOVA_MAG_SIZE];
  642. };
  643. struct iova_cpu_rcache {
  644. spinlock_t lock;
  645. struct iova_magazine *loaded;
  646. struct iova_magazine *prev;
  647. };
  648. static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
  649. {
  650. return kzalloc(sizeof(struct iova_magazine), flags);
  651. }
  652. static void iova_magazine_free(struct iova_magazine *mag)
  653. {
  654. kfree(mag);
  655. }
  656. static void
  657. iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
  658. {
  659. unsigned long flags;
  660. int i;
  661. if (!mag)
  662. return;
  663. spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
  664. for (i = 0 ; i < mag->size; ++i) {
  665. struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
  666. BUG_ON(!iova);
  667. private_free_iova(iovad, iova);
  668. }
  669. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  670. mag->size = 0;
  671. }
  672. static bool iova_magazine_full(struct iova_magazine *mag)
  673. {
  674. return (mag && mag->size == IOVA_MAG_SIZE);
  675. }
  676. static bool iova_magazine_empty(struct iova_magazine *mag)
  677. {
  678. return (!mag || mag->size == 0);
  679. }
  680. static unsigned long iova_magazine_pop(struct iova_magazine *mag,
  681. unsigned long limit_pfn)
  682. {
  683. int i;
  684. unsigned long pfn;
  685. BUG_ON(iova_magazine_empty(mag));
  686. /* Only fall back to the rbtree if we have no suitable pfns at all */
  687. for (i = mag->size - 1; mag->pfns[i] > limit_pfn; i--)
  688. if (i == 0)
  689. return 0;
  690. /* Swap it to pop it */
  691. pfn = mag->pfns[i];
  692. mag->pfns[i] = mag->pfns[--mag->size];
  693. return pfn;
  694. }
  695. static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
  696. {
  697. BUG_ON(iova_magazine_full(mag));
  698. mag->pfns[mag->size++] = pfn;
  699. }
  700. static void init_iova_rcaches(struct iova_domain *iovad)
  701. {
  702. struct iova_cpu_rcache *cpu_rcache;
  703. struct iova_rcache *rcache;
  704. unsigned int cpu;
  705. int i;
  706. for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
  707. rcache = &iovad->rcaches[i];
  708. spin_lock_init(&rcache->lock);
  709. rcache->depot_size = 0;
  710. rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size());
  711. if (WARN_ON(!rcache->cpu_rcaches))
  712. continue;
  713. for_each_possible_cpu(cpu) {
  714. cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
  715. spin_lock_init(&cpu_rcache->lock);
  716. cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL);
  717. cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL);
  718. }
  719. }
  720. }
  721. /*
  722. * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and
  723. * return true on success. Can fail if rcache is full and we can't free
  724. * space, and free_iova() (our only caller) will then return the IOVA
  725. * range to the rbtree instead.
  726. */
  727. static bool __iova_rcache_insert(struct iova_domain *iovad,
  728. struct iova_rcache *rcache,
  729. unsigned long iova_pfn)
  730. {
  731. struct iova_magazine *mag_to_free = NULL;
  732. struct iova_cpu_rcache *cpu_rcache;
  733. bool can_insert = false;
  734. unsigned long flags;
  735. cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
  736. spin_lock_irqsave(&cpu_rcache->lock, flags);
  737. if (!iova_magazine_full(cpu_rcache->loaded)) {
  738. can_insert = true;
  739. } else if (!iova_magazine_full(cpu_rcache->prev)) {
  740. swap(cpu_rcache->prev, cpu_rcache->loaded);
  741. can_insert = true;
  742. } else {
  743. struct iova_magazine *new_mag = iova_magazine_alloc(GFP_ATOMIC);
  744. if (new_mag) {
  745. spin_lock(&rcache->lock);
  746. if (rcache->depot_size < MAX_GLOBAL_MAGS) {
  747. rcache->depot[rcache->depot_size++] =
  748. cpu_rcache->loaded;
  749. } else {
  750. mag_to_free = cpu_rcache->loaded;
  751. }
  752. spin_unlock(&rcache->lock);
  753. cpu_rcache->loaded = new_mag;
  754. can_insert = true;
  755. }
  756. }
  757. if (can_insert)
  758. iova_magazine_push(cpu_rcache->loaded, iova_pfn);
  759. spin_unlock_irqrestore(&cpu_rcache->lock, flags);
  760. if (mag_to_free) {
  761. iova_magazine_free_pfns(mag_to_free, iovad);
  762. iova_magazine_free(mag_to_free);
  763. }
  764. return can_insert;
  765. }
  766. static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn,
  767. unsigned long size)
  768. {
  769. unsigned int log_size = order_base_2(size);
  770. if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
  771. return false;
  772. return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn);
  773. }
  774. /*
  775. * Caller wants to allocate a new IOVA range from 'rcache'. If we can
  776. * satisfy the request, return a matching non-NULL range and remove
  777. * it from the 'rcache'.
  778. */
  779. static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
  780. unsigned long limit_pfn)
  781. {
  782. struct iova_cpu_rcache *cpu_rcache;
  783. unsigned long iova_pfn = 0;
  784. bool has_pfn = false;
  785. unsigned long flags;
  786. cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
  787. spin_lock_irqsave(&cpu_rcache->lock, flags);
  788. if (!iova_magazine_empty(cpu_rcache->loaded)) {
  789. has_pfn = true;
  790. } else if (!iova_magazine_empty(cpu_rcache->prev)) {
  791. swap(cpu_rcache->prev, cpu_rcache->loaded);
  792. has_pfn = true;
  793. } else {
  794. spin_lock(&rcache->lock);
  795. if (rcache->depot_size > 0) {
  796. iova_magazine_free(cpu_rcache->loaded);
  797. cpu_rcache->loaded = rcache->depot[--rcache->depot_size];
  798. has_pfn = true;
  799. }
  800. spin_unlock(&rcache->lock);
  801. }
  802. if (has_pfn)
  803. iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
  804. spin_unlock_irqrestore(&cpu_rcache->lock, flags);
  805. return iova_pfn;
  806. }
  807. /*
  808. * Try to satisfy IOVA allocation range from rcache. Fail if requested
  809. * size is too big or the DMA limit we are given isn't satisfied by the
  810. * top element in the magazine.
  811. */
  812. static unsigned long iova_rcache_get(struct iova_domain *iovad,
  813. unsigned long size,
  814. unsigned long limit_pfn)
  815. {
  816. unsigned int log_size = order_base_2(size);
  817. if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
  818. return 0;
  819. return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);
  820. }
  821. /*
  822. * free rcache data structures.
  823. */
  824. static void free_iova_rcaches(struct iova_domain *iovad)
  825. {
  826. struct iova_rcache *rcache;
  827. struct iova_cpu_rcache *cpu_rcache;
  828. unsigned int cpu;
  829. int i, j;
  830. for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
  831. rcache = &iovad->rcaches[i];
  832. for_each_possible_cpu(cpu) {
  833. cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
  834. iova_magazine_free(cpu_rcache->loaded);
  835. iova_magazine_free(cpu_rcache->prev);
  836. }
  837. free_percpu(rcache->cpu_rcaches);
  838. for (j = 0; j < rcache->depot_size; ++j)
  839. iova_magazine_free(rcache->depot[j]);
  840. }
  841. }
  842. /*
  843. * free all the IOVA ranges cached by a cpu (used when cpu is unplugged)
  844. */
  845. void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
  846. {
  847. struct iova_cpu_rcache *cpu_rcache;
  848. struct iova_rcache *rcache;
  849. unsigned long flags;
  850. int i;
  851. for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
  852. rcache = &iovad->rcaches[i];
  853. cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
  854. spin_lock_irqsave(&cpu_rcache->lock, flags);
  855. iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
  856. iova_magazine_free_pfns(cpu_rcache->prev, iovad);
  857. spin_unlock_irqrestore(&cpu_rcache->lock, flags);
  858. }
  859. }
  860. MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
  861. MODULE_LICENSE("GPL");