iova.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038
  1. /*
  2. * Copyright © 2006-2009, Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  15. * Place - Suite 330, Boston, MA 02111-1307 USA.
  16. *
  17. * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  18. */
  19. #include <linux/iova.h>
  20. #include <linux/module.h>
  21. #include <linux/slab.h>
  22. #include <linux/smp.h>
  23. #include <linux/bitops.h>
  24. #include <linux/cpu.h>
  25. /* The anchor node sits above the top of the usable address space */
  26. #define IOVA_ANCHOR ~0UL
  27. static bool iova_rcache_insert(struct iova_domain *iovad,
  28. unsigned long pfn,
  29. unsigned long size);
  30. static unsigned long iova_rcache_get(struct iova_domain *iovad,
  31. unsigned long size,
  32. unsigned long limit_pfn);
  33. static void init_iova_rcaches(struct iova_domain *iovad);
  34. static void free_iova_rcaches(struct iova_domain *iovad);
  35. static void fq_destroy_all_entries(struct iova_domain *iovad);
  36. static void fq_flush_timeout(struct timer_list *t);
  37. void
  38. init_iova_domain(struct iova_domain *iovad, unsigned long granule,
  39. unsigned long start_pfn)
  40. {
  41. /*
  42. * IOVA granularity will normally be equal to the smallest
  43. * supported IOMMU page size; both *must* be capable of
  44. * representing individual CPU pages exactly.
  45. */
  46. BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
  47. spin_lock_init(&iovad->iova_rbtree_lock);
  48. iovad->rbroot = RB_ROOT;
  49. iovad->cached_node = &iovad->anchor.node;
  50. iovad->cached32_node = &iovad->anchor.node;
  51. iovad->granule = granule;
  52. iovad->start_pfn = start_pfn;
  53. iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
  54. iovad->flush_cb = NULL;
  55. iovad->fq = NULL;
  56. iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
  57. rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
  58. rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
  59. init_iova_rcaches(iovad);
  60. }
  61. EXPORT_SYMBOL_GPL(init_iova_domain);
  62. static void free_iova_flush_queue(struct iova_domain *iovad)
  63. {
  64. if (!iovad->fq)
  65. return;
  66. if (timer_pending(&iovad->fq_timer))
  67. del_timer(&iovad->fq_timer);
  68. fq_destroy_all_entries(iovad);
  69. free_percpu(iovad->fq);
  70. iovad->fq = NULL;
  71. iovad->flush_cb = NULL;
  72. iovad->entry_dtor = NULL;
  73. }
  74. int init_iova_flush_queue(struct iova_domain *iovad,
  75. iova_flush_cb flush_cb, iova_entry_dtor entry_dtor)
  76. {
  77. int cpu;
  78. atomic64_set(&iovad->fq_flush_start_cnt, 0);
  79. atomic64_set(&iovad->fq_flush_finish_cnt, 0);
  80. iovad->fq = alloc_percpu(struct iova_fq);
  81. if (!iovad->fq)
  82. return -ENOMEM;
  83. iovad->flush_cb = flush_cb;
  84. iovad->entry_dtor = entry_dtor;
  85. for_each_possible_cpu(cpu) {
  86. struct iova_fq *fq;
  87. fq = per_cpu_ptr(iovad->fq, cpu);
  88. fq->head = 0;
  89. fq->tail = 0;
  90. spin_lock_init(&fq->lock);
  91. }
  92. timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
  93. atomic_set(&iovad->fq_timer_on, 0);
  94. return 0;
  95. }
  96. EXPORT_SYMBOL_GPL(init_iova_flush_queue);
  97. static struct rb_node *
  98. __get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn)
  99. {
  100. if (limit_pfn <= iovad->dma_32bit_pfn)
  101. return iovad->cached32_node;
  102. return iovad->cached_node;
  103. }
  104. static void
  105. __cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new)
  106. {
  107. if (new->pfn_hi < iovad->dma_32bit_pfn)
  108. iovad->cached32_node = &new->node;
  109. else
  110. iovad->cached_node = &new->node;
  111. }
  112. static void
  113. __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
  114. {
  115. struct iova *cached_iova;
  116. cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
  117. if (free->pfn_hi < iovad->dma_32bit_pfn &&
  118. free->pfn_lo >= cached_iova->pfn_lo)
  119. iovad->cached32_node = rb_next(&free->node);
  120. cached_iova = rb_entry(iovad->cached_node, struct iova, node);
  121. if (free->pfn_lo >= cached_iova->pfn_lo)
  122. iovad->cached_node = rb_next(&free->node);
  123. }
  124. /* Insert the iova into domain rbtree by holding writer lock */
  125. static void
  126. iova_insert_rbtree(struct rb_root *root, struct iova *iova,
  127. struct rb_node *start)
  128. {
  129. struct rb_node **new, *parent = NULL;
  130. new = (start) ? &start : &(root->rb_node);
  131. /* Figure out where to put new node */
  132. while (*new) {
  133. struct iova *this = rb_entry(*new, struct iova, node);
  134. parent = *new;
  135. if (iova->pfn_lo < this->pfn_lo)
  136. new = &((*new)->rb_left);
  137. else if (iova->pfn_lo > this->pfn_lo)
  138. new = &((*new)->rb_right);
  139. else {
  140. WARN_ON(1); /* this should not happen */
  141. return;
  142. }
  143. }
  144. /* Add new node and rebalance tree. */
  145. rb_link_node(&iova->node, parent, new);
  146. rb_insert_color(&iova->node, root);
  147. }
  148. static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
  149. unsigned long size, unsigned long limit_pfn,
  150. struct iova *new, bool size_aligned)
  151. {
  152. struct rb_node *curr, *prev;
  153. struct iova *curr_iova;
  154. unsigned long flags;
  155. unsigned long new_pfn;
  156. unsigned long align_mask = ~0UL;
  157. if (size_aligned)
  158. align_mask <<= fls_long(size - 1);
  159. /* Walk the tree backwards */
  160. spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
  161. curr = __get_cached_rbnode(iovad, limit_pfn);
  162. curr_iova = rb_entry(curr, struct iova, node);
  163. do {
  164. limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
  165. new_pfn = (limit_pfn - size) & align_mask;
  166. prev = curr;
  167. curr = rb_prev(curr);
  168. curr_iova = rb_entry(curr, struct iova, node);
  169. } while (curr && new_pfn <= curr_iova->pfn_hi);
  170. if (limit_pfn < size || new_pfn < iovad->start_pfn) {
  171. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  172. return -ENOMEM;
  173. }
  174. /* pfn_lo will point to size aligned address if size_aligned is set */
  175. new->pfn_lo = new_pfn;
  176. new->pfn_hi = new->pfn_lo + size - 1;
  177. /* If we have 'prev', it's a valid place to start the insertion. */
  178. iova_insert_rbtree(&iovad->rbroot, new, prev);
  179. __cached_rbnode_insert_update(iovad, new);
  180. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  181. return 0;
  182. }
  183. static struct kmem_cache *iova_cache;
  184. static unsigned int iova_cache_users;
  185. static DEFINE_MUTEX(iova_cache_mutex);
  186. struct iova *alloc_iova_mem(void)
  187. {
  188. return kmem_cache_alloc(iova_cache, GFP_ATOMIC);
  189. }
  190. EXPORT_SYMBOL(alloc_iova_mem);
  191. void free_iova_mem(struct iova *iova)
  192. {
  193. if (iova->pfn_lo != IOVA_ANCHOR)
  194. kmem_cache_free(iova_cache, iova);
  195. }
  196. EXPORT_SYMBOL(free_iova_mem);
  197. int iova_cache_get(void)
  198. {
  199. mutex_lock(&iova_cache_mutex);
  200. if (!iova_cache_users) {
  201. iova_cache = kmem_cache_create(
  202. "iommu_iova", sizeof(struct iova), 0,
  203. SLAB_HWCACHE_ALIGN, NULL);
  204. if (!iova_cache) {
  205. mutex_unlock(&iova_cache_mutex);
  206. printk(KERN_ERR "Couldn't create iova cache\n");
  207. return -ENOMEM;
  208. }
  209. }
  210. iova_cache_users++;
  211. mutex_unlock(&iova_cache_mutex);
  212. return 0;
  213. }
  214. EXPORT_SYMBOL_GPL(iova_cache_get);
  215. void iova_cache_put(void)
  216. {
  217. mutex_lock(&iova_cache_mutex);
  218. if (WARN_ON(!iova_cache_users)) {
  219. mutex_unlock(&iova_cache_mutex);
  220. return;
  221. }
  222. iova_cache_users--;
  223. if (!iova_cache_users)
  224. kmem_cache_destroy(iova_cache);
  225. mutex_unlock(&iova_cache_mutex);
  226. }
  227. EXPORT_SYMBOL_GPL(iova_cache_put);
  228. /**
  229. * alloc_iova - allocates an iova
  230. * @iovad: - iova domain in question
  231. * @size: - size of page frames to allocate
  232. * @limit_pfn: - max limit address
  233. * @size_aligned: - set if size_aligned address range is required
  234. * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
  235. * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
  236. * flag is set then the allocated address iova->pfn_lo will be naturally
  237. * aligned on roundup_power_of_two(size).
  238. */
  239. struct iova *
  240. alloc_iova(struct iova_domain *iovad, unsigned long size,
  241. unsigned long limit_pfn,
  242. bool size_aligned)
  243. {
  244. struct iova *new_iova;
  245. int ret;
  246. new_iova = alloc_iova_mem();
  247. if (!new_iova)
  248. return NULL;
  249. ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1,
  250. new_iova, size_aligned);
  251. if (ret) {
  252. free_iova_mem(new_iova);
  253. return NULL;
  254. }
  255. return new_iova;
  256. }
  257. EXPORT_SYMBOL_GPL(alloc_iova);
  258. static struct iova *
  259. private_find_iova(struct iova_domain *iovad, unsigned long pfn)
  260. {
  261. struct rb_node *node = iovad->rbroot.rb_node;
  262. assert_spin_locked(&iovad->iova_rbtree_lock);
  263. while (node) {
  264. struct iova *iova = rb_entry(node, struct iova, node);
  265. if (pfn < iova->pfn_lo)
  266. node = node->rb_left;
  267. else if (pfn > iova->pfn_hi)
  268. node = node->rb_right;
  269. else
  270. return iova; /* pfn falls within iova's range */
  271. }
  272. return NULL;
  273. }
  274. static void private_free_iova(struct iova_domain *iovad, struct iova *iova)
  275. {
  276. assert_spin_locked(&iovad->iova_rbtree_lock);
  277. __cached_rbnode_delete_update(iovad, iova);
  278. rb_erase(&iova->node, &iovad->rbroot);
  279. free_iova_mem(iova);
  280. }
  281. /**
  282. * find_iova - finds an iova for a given pfn
  283. * @iovad: - iova domain in question.
  284. * @pfn: - page frame number
  285. * This function finds and returns an iova belonging to the
  286. * given doamin which matches the given pfn.
  287. */
  288. struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
  289. {
  290. unsigned long flags;
  291. struct iova *iova;
  292. /* Take the lock so that no other thread is manipulating the rbtree */
  293. spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
  294. iova = private_find_iova(iovad, pfn);
  295. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  296. return iova;
  297. }
  298. EXPORT_SYMBOL_GPL(find_iova);
  299. /**
  300. * __free_iova - frees the given iova
  301. * @iovad: iova domain in question.
  302. * @iova: iova in question.
  303. * Frees the given iova belonging to the giving domain
  304. */
  305. void
  306. __free_iova(struct iova_domain *iovad, struct iova *iova)
  307. {
  308. unsigned long flags;
  309. spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
  310. private_free_iova(iovad, iova);
  311. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  312. }
  313. EXPORT_SYMBOL_GPL(__free_iova);
  314. /**
  315. * free_iova - finds and frees the iova for a given pfn
  316. * @iovad: - iova domain in question.
  317. * @pfn: - pfn that is allocated previously
  318. * This functions finds an iova for a given pfn and then
  319. * frees the iova from that domain.
  320. */
  321. void
  322. free_iova(struct iova_domain *iovad, unsigned long pfn)
  323. {
  324. struct iova *iova = find_iova(iovad, pfn);
  325. if (iova)
  326. __free_iova(iovad, iova);
  327. }
  328. EXPORT_SYMBOL_GPL(free_iova);
  329. /**
  330. * alloc_iova_fast - allocates an iova from rcache
  331. * @iovad: - iova domain in question
  332. * @size: - size of page frames to allocate
  333. * @limit_pfn: - max limit address
  334. * @flush_rcache: - set to flush rcache on regular allocation failure
  335. * This function tries to satisfy an iova allocation from the rcache,
  336. * and falls back to regular allocation on failure. If regular allocation
  337. * fails too and the flush_rcache flag is set then the rcache will be flushed.
  338. */
  339. unsigned long
  340. alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
  341. unsigned long limit_pfn, bool flush_rcache)
  342. {
  343. unsigned long iova_pfn;
  344. struct iova *new_iova;
  345. iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1);
  346. if (iova_pfn)
  347. return iova_pfn;
  348. retry:
  349. new_iova = alloc_iova(iovad, size, limit_pfn, true);
  350. if (!new_iova) {
  351. unsigned int cpu;
  352. if (!flush_rcache)
  353. return 0;
  354. /* Try replenishing IOVAs by flushing rcache. */
  355. flush_rcache = false;
  356. for_each_online_cpu(cpu)
  357. free_cpu_cached_iovas(cpu, iovad);
  358. goto retry;
  359. }
  360. return new_iova->pfn_lo;
  361. }
  362. EXPORT_SYMBOL_GPL(alloc_iova_fast);
  363. /**
  364. * free_iova_fast - free iova pfn range into rcache
  365. * @iovad: - iova domain in question.
  366. * @pfn: - pfn that is allocated previously
  367. * @size: - # of pages in range
  368. * This functions frees an iova range by trying to put it into the rcache,
  369. * falling back to regular iova deallocation via free_iova() if this fails.
  370. */
  371. void
  372. free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
  373. {
  374. if (iova_rcache_insert(iovad, pfn, size))
  375. return;
  376. free_iova(iovad, pfn);
  377. }
  378. EXPORT_SYMBOL_GPL(free_iova_fast);
  379. #define fq_ring_for_each(i, fq) \
  380. for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
  381. static inline bool fq_full(struct iova_fq *fq)
  382. {
  383. assert_spin_locked(&fq->lock);
  384. return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head);
  385. }
  386. static inline unsigned fq_ring_add(struct iova_fq *fq)
  387. {
  388. unsigned idx = fq->tail;
  389. assert_spin_locked(&fq->lock);
  390. fq->tail = (idx + 1) % IOVA_FQ_SIZE;
  391. return idx;
  392. }
  393. static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq)
  394. {
  395. u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt);
  396. unsigned idx;
  397. assert_spin_locked(&fq->lock);
  398. fq_ring_for_each(idx, fq) {
  399. if (fq->entries[idx].counter >= counter)
  400. break;
  401. if (iovad->entry_dtor)
  402. iovad->entry_dtor(fq->entries[idx].data);
  403. free_iova_fast(iovad,
  404. fq->entries[idx].iova_pfn,
  405. fq->entries[idx].pages);
  406. fq->head = (fq->head + 1) % IOVA_FQ_SIZE;
  407. }
  408. }
  409. static void iova_domain_flush(struct iova_domain *iovad)
  410. {
  411. atomic64_inc(&iovad->fq_flush_start_cnt);
  412. iovad->flush_cb(iovad);
  413. atomic64_inc(&iovad->fq_flush_finish_cnt);
  414. }
  415. static void fq_destroy_all_entries(struct iova_domain *iovad)
  416. {
  417. int cpu;
  418. /*
  419. * This code runs when the iova_domain is being detroyed, so don't
  420. * bother to free iovas, just call the entry_dtor on all remaining
  421. * entries.
  422. */
  423. if (!iovad->entry_dtor)
  424. return;
  425. for_each_possible_cpu(cpu) {
  426. struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu);
  427. int idx;
  428. fq_ring_for_each(idx, fq)
  429. iovad->entry_dtor(fq->entries[idx].data);
  430. }
  431. }
  432. static void fq_flush_timeout(struct timer_list *t)
  433. {
  434. struct iova_domain *iovad = from_timer(iovad, t, fq_timer);
  435. int cpu;
  436. atomic_set(&iovad->fq_timer_on, 0);
  437. iova_domain_flush(iovad);
  438. for_each_possible_cpu(cpu) {
  439. unsigned long flags;
  440. struct iova_fq *fq;
  441. fq = per_cpu_ptr(iovad->fq, cpu);
  442. spin_lock_irqsave(&fq->lock, flags);
  443. fq_ring_free(iovad, fq);
  444. spin_unlock_irqrestore(&fq->lock, flags);
  445. }
  446. }
  447. void queue_iova(struct iova_domain *iovad,
  448. unsigned long pfn, unsigned long pages,
  449. unsigned long data)
  450. {
  451. struct iova_fq *fq = raw_cpu_ptr(iovad->fq);
  452. unsigned long flags;
  453. unsigned idx;
  454. spin_lock_irqsave(&fq->lock, flags);
  455. /*
  456. * First remove all entries from the flush queue that have already been
  457. * flushed out on another CPU. This makes the fq_full() check below less
  458. * likely to be true.
  459. */
  460. fq_ring_free(iovad, fq);
  461. if (fq_full(fq)) {
  462. iova_domain_flush(iovad);
  463. fq_ring_free(iovad, fq);
  464. }
  465. idx = fq_ring_add(fq);
  466. fq->entries[idx].iova_pfn = pfn;
  467. fq->entries[idx].pages = pages;
  468. fq->entries[idx].data = data;
  469. fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt);
  470. spin_unlock_irqrestore(&fq->lock, flags);
  471. if (atomic_cmpxchg(&iovad->fq_timer_on, 0, 1) == 0)
  472. mod_timer(&iovad->fq_timer,
  473. jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
  474. }
  475. EXPORT_SYMBOL_GPL(queue_iova);
  476. /**
  477. * put_iova_domain - destroys the iova doamin
  478. * @iovad: - iova domain in question.
  479. * All the iova's in that domain are destroyed.
  480. */
  481. void put_iova_domain(struct iova_domain *iovad)
  482. {
  483. struct iova *iova, *tmp;
  484. free_iova_flush_queue(iovad);
  485. free_iova_rcaches(iovad);
  486. rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
  487. free_iova_mem(iova);
  488. }
  489. EXPORT_SYMBOL_GPL(put_iova_domain);
  490. static int
  491. __is_range_overlap(struct rb_node *node,
  492. unsigned long pfn_lo, unsigned long pfn_hi)
  493. {
  494. struct iova *iova = rb_entry(node, struct iova, node);
  495. if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
  496. return 1;
  497. return 0;
  498. }
  499. static inline struct iova *
  500. alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi)
  501. {
  502. struct iova *iova;
  503. iova = alloc_iova_mem();
  504. if (iova) {
  505. iova->pfn_lo = pfn_lo;
  506. iova->pfn_hi = pfn_hi;
  507. }
  508. return iova;
  509. }
  510. static struct iova *
  511. __insert_new_range(struct iova_domain *iovad,
  512. unsigned long pfn_lo, unsigned long pfn_hi)
  513. {
  514. struct iova *iova;
  515. iova = alloc_and_init_iova(pfn_lo, pfn_hi);
  516. if (iova)
  517. iova_insert_rbtree(&iovad->rbroot, iova, NULL);
  518. return iova;
  519. }
  520. static void
  521. __adjust_overlap_range(struct iova *iova,
  522. unsigned long *pfn_lo, unsigned long *pfn_hi)
  523. {
  524. if (*pfn_lo < iova->pfn_lo)
  525. iova->pfn_lo = *pfn_lo;
  526. if (*pfn_hi > iova->pfn_hi)
  527. *pfn_lo = iova->pfn_hi + 1;
  528. }
  529. /**
  530. * reserve_iova - reserves an iova in the given range
  531. * @iovad: - iova domain pointer
  532. * @pfn_lo: - lower page frame address
  533. * @pfn_hi:- higher pfn adderss
  534. * This function allocates reserves the address range from pfn_lo to pfn_hi so
  535. * that this address is not dished out as part of alloc_iova.
  536. */
  537. struct iova *
  538. reserve_iova(struct iova_domain *iovad,
  539. unsigned long pfn_lo, unsigned long pfn_hi)
  540. {
  541. struct rb_node *node;
  542. unsigned long flags;
  543. struct iova *iova;
  544. unsigned int overlap = 0;
  545. /* Don't allow nonsensical pfns */
  546. if (WARN_ON((pfn_hi | pfn_lo) > (ULLONG_MAX >> iova_shift(iovad))))
  547. return NULL;
  548. spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
  549. for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
  550. if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
  551. iova = rb_entry(node, struct iova, node);
  552. __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
  553. if ((pfn_lo >= iova->pfn_lo) &&
  554. (pfn_hi <= iova->pfn_hi))
  555. goto finish;
  556. overlap = 1;
  557. } else if (overlap)
  558. break;
  559. }
  560. /* We are here either because this is the first reserver node
  561. * or need to insert remaining non overlap addr range
  562. */
  563. iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
  564. finish:
  565. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  566. return iova;
  567. }
  568. EXPORT_SYMBOL_GPL(reserve_iova);
  569. /**
  570. * copy_reserved_iova - copies the reserved between domains
  571. * @from: - source doamin from where to copy
  572. * @to: - destination domin where to copy
  573. * This function copies reserved iova's from one doamin to
  574. * other.
  575. */
  576. void
  577. copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
  578. {
  579. unsigned long flags;
  580. struct rb_node *node;
  581. spin_lock_irqsave(&from->iova_rbtree_lock, flags);
  582. for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
  583. struct iova *iova = rb_entry(node, struct iova, node);
  584. struct iova *new_iova;
  585. if (iova->pfn_lo == IOVA_ANCHOR)
  586. continue;
  587. new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
  588. if (!new_iova)
  589. printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
  590. iova->pfn_lo, iova->pfn_lo);
  591. }
  592. spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
  593. }
  594. EXPORT_SYMBOL_GPL(copy_reserved_iova);
  595. struct iova *
  596. split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
  597. unsigned long pfn_lo, unsigned long pfn_hi)
  598. {
  599. unsigned long flags;
  600. struct iova *prev = NULL, *next = NULL;
  601. spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
  602. if (iova->pfn_lo < pfn_lo) {
  603. prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1);
  604. if (prev == NULL)
  605. goto error;
  606. }
  607. if (iova->pfn_hi > pfn_hi) {
  608. next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi);
  609. if (next == NULL)
  610. goto error;
  611. }
  612. __cached_rbnode_delete_update(iovad, iova);
  613. rb_erase(&iova->node, &iovad->rbroot);
  614. if (prev) {
  615. iova_insert_rbtree(&iovad->rbroot, prev, NULL);
  616. iova->pfn_lo = pfn_lo;
  617. }
  618. if (next) {
  619. iova_insert_rbtree(&iovad->rbroot, next, NULL);
  620. iova->pfn_hi = pfn_hi;
  621. }
  622. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  623. return iova;
  624. error:
  625. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  626. if (prev)
  627. free_iova_mem(prev);
  628. return NULL;
  629. }
  630. /*
  631. * Magazine caches for IOVA ranges. For an introduction to magazines,
  632. * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab
  633. * Allocator to Many CPUs and Arbitrary Resources" by Bonwick and Adams.
  634. * For simplicity, we use a static magazine size and don't implement the
  635. * dynamic size tuning described in the paper.
  636. */
  637. #define IOVA_MAG_SIZE 128
  638. struct iova_magazine {
  639. unsigned long size;
  640. unsigned long pfns[IOVA_MAG_SIZE];
  641. };
  642. struct iova_cpu_rcache {
  643. spinlock_t lock;
  644. struct iova_magazine *loaded;
  645. struct iova_magazine *prev;
  646. };
  647. static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
  648. {
  649. return kzalloc(sizeof(struct iova_magazine), flags);
  650. }
  651. static void iova_magazine_free(struct iova_magazine *mag)
  652. {
  653. kfree(mag);
  654. }
  655. static void
  656. iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
  657. {
  658. unsigned long flags;
  659. int i;
  660. if (!mag)
  661. return;
  662. spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
  663. for (i = 0 ; i < mag->size; ++i) {
  664. struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
  665. BUG_ON(!iova);
  666. private_free_iova(iovad, iova);
  667. }
  668. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  669. mag->size = 0;
  670. }
  671. static bool iova_magazine_full(struct iova_magazine *mag)
  672. {
  673. return (mag && mag->size == IOVA_MAG_SIZE);
  674. }
  675. static bool iova_magazine_empty(struct iova_magazine *mag)
  676. {
  677. return (!mag || mag->size == 0);
  678. }
  679. static unsigned long iova_magazine_pop(struct iova_magazine *mag,
  680. unsigned long limit_pfn)
  681. {
  682. int i;
  683. unsigned long pfn;
  684. BUG_ON(iova_magazine_empty(mag));
  685. /* Only fall back to the rbtree if we have no suitable pfns at all */
  686. for (i = mag->size - 1; mag->pfns[i] > limit_pfn; i--)
  687. if (i == 0)
  688. return 0;
  689. /* Swap it to pop it */
  690. pfn = mag->pfns[i];
  691. mag->pfns[i] = mag->pfns[--mag->size];
  692. return pfn;
  693. }
  694. static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
  695. {
  696. BUG_ON(iova_magazine_full(mag));
  697. mag->pfns[mag->size++] = pfn;
  698. }
  699. static void init_iova_rcaches(struct iova_domain *iovad)
  700. {
  701. struct iova_cpu_rcache *cpu_rcache;
  702. struct iova_rcache *rcache;
  703. unsigned int cpu;
  704. int i;
  705. for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
  706. rcache = &iovad->rcaches[i];
  707. spin_lock_init(&rcache->lock);
  708. rcache->depot_size = 0;
  709. rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size());
  710. if (WARN_ON(!rcache->cpu_rcaches))
  711. continue;
  712. for_each_possible_cpu(cpu) {
  713. cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
  714. spin_lock_init(&cpu_rcache->lock);
  715. cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL);
  716. cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL);
  717. }
  718. }
  719. }
  720. /*
  721. * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and
  722. * return true on success. Can fail if rcache is full and we can't free
  723. * space, and free_iova() (our only caller) will then return the IOVA
  724. * range to the rbtree instead.
  725. */
  726. static bool __iova_rcache_insert(struct iova_domain *iovad,
  727. struct iova_rcache *rcache,
  728. unsigned long iova_pfn)
  729. {
  730. struct iova_magazine *mag_to_free = NULL;
  731. struct iova_cpu_rcache *cpu_rcache;
  732. bool can_insert = false;
  733. unsigned long flags;
  734. cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
  735. spin_lock_irqsave(&cpu_rcache->lock, flags);
  736. if (!iova_magazine_full(cpu_rcache->loaded)) {
  737. can_insert = true;
  738. } else if (!iova_magazine_full(cpu_rcache->prev)) {
  739. swap(cpu_rcache->prev, cpu_rcache->loaded);
  740. can_insert = true;
  741. } else {
  742. struct iova_magazine *new_mag = iova_magazine_alloc(GFP_ATOMIC);
  743. if (new_mag) {
  744. spin_lock(&rcache->lock);
  745. if (rcache->depot_size < MAX_GLOBAL_MAGS) {
  746. rcache->depot[rcache->depot_size++] =
  747. cpu_rcache->loaded;
  748. } else {
  749. mag_to_free = cpu_rcache->loaded;
  750. }
  751. spin_unlock(&rcache->lock);
  752. cpu_rcache->loaded = new_mag;
  753. can_insert = true;
  754. }
  755. }
  756. if (can_insert)
  757. iova_magazine_push(cpu_rcache->loaded, iova_pfn);
  758. spin_unlock_irqrestore(&cpu_rcache->lock, flags);
  759. if (mag_to_free) {
  760. iova_magazine_free_pfns(mag_to_free, iovad);
  761. iova_magazine_free(mag_to_free);
  762. }
  763. return can_insert;
  764. }
  765. static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn,
  766. unsigned long size)
  767. {
  768. unsigned int log_size = order_base_2(size);
  769. if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
  770. return false;
  771. return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn);
  772. }
  773. /*
  774. * Caller wants to allocate a new IOVA range from 'rcache'. If we can
  775. * satisfy the request, return a matching non-NULL range and remove
  776. * it from the 'rcache'.
  777. */
  778. static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
  779. unsigned long limit_pfn)
  780. {
  781. struct iova_cpu_rcache *cpu_rcache;
  782. unsigned long iova_pfn = 0;
  783. bool has_pfn = false;
  784. unsigned long flags;
  785. cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
  786. spin_lock_irqsave(&cpu_rcache->lock, flags);
  787. if (!iova_magazine_empty(cpu_rcache->loaded)) {
  788. has_pfn = true;
  789. } else if (!iova_magazine_empty(cpu_rcache->prev)) {
  790. swap(cpu_rcache->prev, cpu_rcache->loaded);
  791. has_pfn = true;
  792. } else {
  793. spin_lock(&rcache->lock);
  794. if (rcache->depot_size > 0) {
  795. iova_magazine_free(cpu_rcache->loaded);
  796. cpu_rcache->loaded = rcache->depot[--rcache->depot_size];
  797. has_pfn = true;
  798. }
  799. spin_unlock(&rcache->lock);
  800. }
  801. if (has_pfn)
  802. iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
  803. spin_unlock_irqrestore(&cpu_rcache->lock, flags);
  804. return iova_pfn;
  805. }
  806. /*
  807. * Try to satisfy IOVA allocation range from rcache. Fail if requested
  808. * size is too big or the DMA limit we are given isn't satisfied by the
  809. * top element in the magazine.
  810. */
  811. static unsigned long iova_rcache_get(struct iova_domain *iovad,
  812. unsigned long size,
  813. unsigned long limit_pfn)
  814. {
  815. unsigned int log_size = order_base_2(size);
  816. if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
  817. return 0;
  818. return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);
  819. }
  820. /*
  821. * free rcache data structures.
  822. */
  823. static void free_iova_rcaches(struct iova_domain *iovad)
  824. {
  825. struct iova_rcache *rcache;
  826. struct iova_cpu_rcache *cpu_rcache;
  827. unsigned int cpu;
  828. int i, j;
  829. for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
  830. rcache = &iovad->rcaches[i];
  831. for_each_possible_cpu(cpu) {
  832. cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
  833. iova_magazine_free(cpu_rcache->loaded);
  834. iova_magazine_free(cpu_rcache->prev);
  835. }
  836. free_percpu(rcache->cpu_rcaches);
  837. for (j = 0; j < rcache->depot_size; ++j)
  838. iova_magazine_free(rcache->depot[j]);
  839. }
  840. }
  841. /*
  842. * free all the IOVA ranges cached by a cpu (used when cpu is unplugged)
  843. */
  844. void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
  845. {
  846. struct iova_cpu_rcache *cpu_rcache;
  847. struct iova_rcache *rcache;
  848. unsigned long flags;
  849. int i;
  850. for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
  851. rcache = &iovad->rcaches[i];
  852. cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
  853. spin_lock_irqsave(&cpu_rcache->lock, flags);
  854. iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
  855. iova_magazine_free_pfns(cpu_rcache->prev, iovad);
  856. spin_unlock_irqrestore(&cpu_rcache->lock, flags);
  857. }
  858. }
  859. MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
  860. MODULE_LICENSE("GPL");