intel-svm.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654
  1. /*
  2. * Copyright © 2015 Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * Authors: David Woodhouse <dwmw2@infradead.org>
  14. */
  15. #include <linux/intel-iommu.h>
  16. #include <linux/mmu_notifier.h>
  17. #include <linux/sched.h>
  18. #include <linux/sched/mm.h>
  19. #include <linux/slab.h>
  20. #include <linux/intel-svm.h>
  21. #include <linux/rculist.h>
  22. #include <linux/pci.h>
  23. #include <linux/pci-ats.h>
  24. #include <linux/dmar.h>
  25. #include <linux/interrupt.h>
  26. static irqreturn_t prq_event_thread(int irq, void *d);
  27. struct pasid_entry {
  28. u64 val;
  29. };
  30. struct pasid_state_entry {
  31. u64 val;
  32. };
  33. int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
  34. {
  35. struct page *pages;
  36. int order;
  37. /* Start at 2 because it's defined as 2^(1+PSS) */
  38. iommu->pasid_max = 2 << ecap_pss(iommu->ecap);
  39. /* Eventually I'm promised we will get a multi-level PASID table
  40. * and it won't have to be physically contiguous. Until then,
  41. * limit the size because 8MiB contiguous allocations can be hard
  42. * to come by. The limit of 0x20000, which is 1MiB for each of
  43. * the PASID and PASID-state tables, is somewhat arbitrary. */
  44. if (iommu->pasid_max > 0x20000)
  45. iommu->pasid_max = 0x20000;
  46. order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
  47. pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
  48. if (!pages) {
  49. pr_warn("IOMMU: %s: Failed to allocate PASID table\n",
  50. iommu->name);
  51. return -ENOMEM;
  52. }
  53. iommu->pasid_table = page_address(pages);
  54. pr_info("%s: Allocated order %d PASID table.\n", iommu->name, order);
  55. if (ecap_dis(iommu->ecap)) {
  56. /* Just making it explicit... */
  57. BUILD_BUG_ON(sizeof(struct pasid_entry) != sizeof(struct pasid_state_entry));
  58. pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
  59. if (pages)
  60. iommu->pasid_state_table = page_address(pages);
  61. else
  62. pr_warn("IOMMU: %s: Failed to allocate PASID state table\n",
  63. iommu->name);
  64. }
  65. idr_init(&iommu->pasid_idr);
  66. return 0;
  67. }
  68. int intel_svm_free_pasid_tables(struct intel_iommu *iommu)
  69. {
  70. int order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
  71. if (iommu->pasid_table) {
  72. free_pages((unsigned long)iommu->pasid_table, order);
  73. iommu->pasid_table = NULL;
  74. }
  75. if (iommu->pasid_state_table) {
  76. free_pages((unsigned long)iommu->pasid_state_table, order);
  77. iommu->pasid_state_table = NULL;
  78. }
  79. idr_destroy(&iommu->pasid_idr);
  80. return 0;
  81. }
  82. #define PRQ_ORDER 0
  83. int intel_svm_enable_prq(struct intel_iommu *iommu)
  84. {
  85. struct page *pages;
  86. int irq, ret;
  87. pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
  88. if (!pages) {
  89. pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
  90. iommu->name);
  91. return -ENOMEM;
  92. }
  93. iommu->prq = page_address(pages);
  94. irq = dmar_alloc_hwirq(DMAR_UNITS_SUPPORTED + iommu->seq_id, iommu->node, iommu);
  95. if (irq <= 0) {
  96. pr_err("IOMMU: %s: Failed to create IRQ vector for page request queue\n",
  97. iommu->name);
  98. ret = -EINVAL;
  99. err:
  100. free_pages((unsigned long)iommu->prq, PRQ_ORDER);
  101. iommu->prq = NULL;
  102. return ret;
  103. }
  104. iommu->pr_irq = irq;
  105. snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id);
  106. ret = request_threaded_irq(irq, NULL, prq_event_thread, IRQF_ONESHOT,
  107. iommu->prq_name, iommu);
  108. if (ret) {
  109. pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n",
  110. iommu->name);
  111. dmar_free_hwirq(irq);
  112. goto err;
  113. }
  114. dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
  115. dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
  116. dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER);
  117. return 0;
  118. }
  119. int intel_svm_finish_prq(struct intel_iommu *iommu)
  120. {
  121. dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
  122. dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
  123. dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL);
  124. free_irq(iommu->pr_irq, iommu);
  125. dmar_free_hwirq(iommu->pr_irq);
  126. iommu->pr_irq = 0;
  127. free_pages((unsigned long)iommu->prq, PRQ_ORDER);
  128. iommu->prq = NULL;
  129. return 0;
  130. }
  131. static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
  132. unsigned long address, unsigned long pages, int ih, int gl)
  133. {
  134. struct qi_desc desc;
  135. if (pages == -1) {
  136. /* For global kernel pages we have to flush them in *all* PASIDs
  137. * because that's the only option the hardware gives us. Despite
  138. * the fact that they are actually only accessible through one. */
  139. if (gl)
  140. desc.low = QI_EIOTLB_PASID(svm->pasid) | QI_EIOTLB_DID(sdev->did) |
  141. QI_EIOTLB_GRAN(QI_GRAN_ALL_ALL) | QI_EIOTLB_TYPE;
  142. else
  143. desc.low = QI_EIOTLB_PASID(svm->pasid) | QI_EIOTLB_DID(sdev->did) |
  144. QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | QI_EIOTLB_TYPE;
  145. desc.high = 0;
  146. } else {
  147. int mask = ilog2(__roundup_pow_of_two(pages));
  148. desc.low = QI_EIOTLB_PASID(svm->pasid) | QI_EIOTLB_DID(sdev->did) |
  149. QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) | QI_EIOTLB_TYPE;
  150. desc.high = QI_EIOTLB_ADDR(address) | QI_EIOTLB_GL(gl) |
  151. QI_EIOTLB_IH(ih) | QI_EIOTLB_AM(mask);
  152. }
  153. qi_submit_sync(&desc, svm->iommu);
  154. if (sdev->dev_iotlb) {
  155. desc.low = QI_DEV_EIOTLB_PASID(svm->pasid) | QI_DEV_EIOTLB_SID(sdev->sid) |
  156. QI_DEV_EIOTLB_QDEP(sdev->qdep) | QI_DEIOTLB_TYPE;
  157. if (pages == -1) {
  158. desc.high = QI_DEV_EIOTLB_ADDR(-1ULL >> 1) | QI_DEV_EIOTLB_SIZE;
  159. } else if (pages > 1) {
  160. /* The least significant zero bit indicates the size. So,
  161. * for example, an "address" value of 0x12345f000 will
  162. * flush from 0x123440000 to 0x12347ffff (256KiB). */
  163. unsigned long last = address + ((unsigned long)(pages - 1) << VTD_PAGE_SHIFT);
  164. unsigned long mask = __rounddown_pow_of_two(address ^ last);;
  165. desc.high = QI_DEV_EIOTLB_ADDR((address & ~mask) | (mask - 1)) | QI_DEV_EIOTLB_SIZE;
  166. } else {
  167. desc.high = QI_DEV_EIOTLB_ADDR(address);
  168. }
  169. qi_submit_sync(&desc, svm->iommu);
  170. }
  171. }
  172. static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
  173. unsigned long pages, int ih, int gl)
  174. {
  175. struct intel_svm_dev *sdev;
  176. /* Try deferred invalidate if available */
  177. if (svm->iommu->pasid_state_table &&
  178. !cmpxchg64(&svm->iommu->pasid_state_table[svm->pasid].val, 0, 1ULL << 63))
  179. return;
  180. rcu_read_lock();
  181. list_for_each_entry_rcu(sdev, &svm->devs, list)
  182. intel_flush_svm_range_dev(svm, sdev, address, pages, ih, gl);
  183. rcu_read_unlock();
  184. }
  185. static void intel_change_pte(struct mmu_notifier *mn, struct mm_struct *mm,
  186. unsigned long address, pte_t pte)
  187. {
  188. struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
  189. intel_flush_svm_range(svm, address, 1, 1, 0);
  190. }
  191. static void intel_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm,
  192. unsigned long address)
  193. {
  194. struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
  195. intel_flush_svm_range(svm, address, 1, 1, 0);
  196. }
  197. /* Pages have been freed at this point */
  198. static void intel_invalidate_range(struct mmu_notifier *mn,
  199. struct mm_struct *mm,
  200. unsigned long start, unsigned long end)
  201. {
  202. struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
  203. intel_flush_svm_range(svm, start,
  204. (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0, 0);
  205. }
  206. static void intel_flush_pasid_dev(struct intel_svm *svm, struct intel_svm_dev *sdev, int pasid)
  207. {
  208. struct qi_desc desc;
  209. desc.high = 0;
  210. desc.low = QI_PC_TYPE | QI_PC_DID(sdev->did) | QI_PC_PASID_SEL | QI_PC_PASID(pasid);
  211. qi_submit_sync(&desc, svm->iommu);
  212. }
  213. static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
  214. {
  215. struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
  216. struct intel_svm_dev *sdev;
  217. /* This might end up being called from exit_mmap(), *before* the page
  218. * tables are cleared. And __mmu_notifier_release() will delete us from
  219. * the list of notifiers so that our invalidate_range() callback doesn't
  220. * get called when the page tables are cleared. So we need to protect
  221. * against hardware accessing those page tables.
  222. *
  223. * We do it by clearing the entry in the PASID table and then flushing
  224. * the IOTLB and the PASID table caches. This might upset hardware;
  225. * perhaps we'll want to point the PASID to a dummy PGD (like the zero
  226. * page) so that we end up taking a fault that the hardware really
  227. * *has* to handle gracefully without affecting other processes.
  228. */
  229. svm->iommu->pasid_table[svm->pasid].val = 0;
  230. wmb();
  231. rcu_read_lock();
  232. list_for_each_entry_rcu(sdev, &svm->devs, list) {
  233. intel_flush_pasid_dev(svm, sdev, svm->pasid);
  234. intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
  235. }
  236. rcu_read_unlock();
  237. }
  238. static const struct mmu_notifier_ops intel_mmuops = {
  239. .release = intel_mm_release,
  240. .change_pte = intel_change_pte,
  241. .invalidate_page = intel_invalidate_page,
  242. .invalidate_range = intel_invalidate_range,
  243. };
  244. static DEFINE_MUTEX(pasid_mutex);
  245. int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops)
  246. {
  247. struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
  248. struct intel_svm_dev *sdev;
  249. struct intel_svm *svm = NULL;
  250. struct mm_struct *mm = NULL;
  251. int pasid_max;
  252. int ret;
  253. if (WARN_ON(!iommu))
  254. return -EINVAL;
  255. if (dev_is_pci(dev)) {
  256. pasid_max = pci_max_pasids(to_pci_dev(dev));
  257. if (pasid_max < 0)
  258. return -EINVAL;
  259. } else
  260. pasid_max = 1 << 20;
  261. if ((flags & SVM_FLAG_SUPERVISOR_MODE)) {
  262. if (!ecap_srs(iommu->ecap))
  263. return -EINVAL;
  264. } else if (pasid) {
  265. mm = get_task_mm(current);
  266. BUG_ON(!mm);
  267. }
  268. mutex_lock(&pasid_mutex);
  269. if (pasid && !(flags & SVM_FLAG_PRIVATE_PASID)) {
  270. int i;
  271. idr_for_each_entry(&iommu->pasid_idr, svm, i) {
  272. if (svm->mm != mm ||
  273. (svm->flags & SVM_FLAG_PRIVATE_PASID))
  274. continue;
  275. if (svm->pasid >= pasid_max) {
  276. dev_warn(dev,
  277. "Limited PASID width. Cannot use existing PASID %d\n",
  278. svm->pasid);
  279. ret = -ENOSPC;
  280. goto out;
  281. }
  282. list_for_each_entry(sdev, &svm->devs, list) {
  283. if (dev == sdev->dev) {
  284. if (sdev->ops != ops) {
  285. ret = -EBUSY;
  286. goto out;
  287. }
  288. sdev->users++;
  289. goto success;
  290. }
  291. }
  292. break;
  293. }
  294. }
  295. sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
  296. if (!sdev) {
  297. ret = -ENOMEM;
  298. goto out;
  299. }
  300. sdev->dev = dev;
  301. ret = intel_iommu_enable_pasid(iommu, sdev);
  302. if (ret || !pasid) {
  303. /* If they don't actually want to assign a PASID, this is
  304. * just an enabling check/preparation. */
  305. kfree(sdev);
  306. goto out;
  307. }
  308. /* Finish the setup now we know we're keeping it */
  309. sdev->users = 1;
  310. sdev->ops = ops;
  311. init_rcu_head(&sdev->rcu);
  312. if (!svm) {
  313. svm = kzalloc(sizeof(*svm), GFP_KERNEL);
  314. if (!svm) {
  315. ret = -ENOMEM;
  316. kfree(sdev);
  317. goto out;
  318. }
  319. svm->iommu = iommu;
  320. if (pasid_max > iommu->pasid_max)
  321. pasid_max = iommu->pasid_max;
  322. /* Do not use PASID 0 in caching mode (virtualised IOMMU) */
  323. ret = idr_alloc(&iommu->pasid_idr, svm,
  324. !!cap_caching_mode(iommu->cap),
  325. pasid_max - 1, GFP_KERNEL);
  326. if (ret < 0) {
  327. kfree(svm);
  328. goto out;
  329. }
  330. svm->pasid = ret;
  331. svm->notifier.ops = &intel_mmuops;
  332. svm->mm = mm;
  333. svm->flags = flags;
  334. INIT_LIST_HEAD_RCU(&svm->devs);
  335. ret = -ENOMEM;
  336. if (mm) {
  337. ret = mmu_notifier_register(&svm->notifier, mm);
  338. if (ret) {
  339. idr_remove(&svm->iommu->pasid_idr, svm->pasid);
  340. kfree(svm);
  341. kfree(sdev);
  342. goto out;
  343. }
  344. iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) | 1;
  345. } else
  346. iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11);
  347. wmb();
  348. /* In caching mode, we still have to flush with PASID 0 when
  349. * a PASID table entry becomes present. Not entirely clear
  350. * *why* that would be the case — surely we could just issue
  351. * a flush with the PASID value that we've changed? The PASID
  352. * is the index into the table, after all. It's not like domain
  353. * IDs in the case of the equivalent context-entry change in
  354. * caching mode. And for that matter it's not entirely clear why
  355. * a VMM would be in the business of caching the PASID table
  356. * anyway. Surely that can be left entirely to the guest? */
  357. if (cap_caching_mode(iommu->cap))
  358. intel_flush_pasid_dev(svm, sdev, 0);
  359. }
  360. list_add_rcu(&sdev->list, &svm->devs);
  361. success:
  362. *pasid = svm->pasid;
  363. ret = 0;
  364. out:
  365. mutex_unlock(&pasid_mutex);
  366. if (mm)
  367. mmput(mm);
  368. return ret;
  369. }
  370. EXPORT_SYMBOL_GPL(intel_svm_bind_mm);
  371. int intel_svm_unbind_mm(struct device *dev, int pasid)
  372. {
  373. struct intel_svm_dev *sdev;
  374. struct intel_iommu *iommu;
  375. struct intel_svm *svm;
  376. int ret = -EINVAL;
  377. mutex_lock(&pasid_mutex);
  378. iommu = intel_svm_device_to_iommu(dev);
  379. if (!iommu || !iommu->pasid_table)
  380. goto out;
  381. svm = idr_find(&iommu->pasid_idr, pasid);
  382. if (!svm)
  383. goto out;
  384. list_for_each_entry(sdev, &svm->devs, list) {
  385. if (dev == sdev->dev) {
  386. ret = 0;
  387. sdev->users--;
  388. if (!sdev->users) {
  389. list_del_rcu(&sdev->list);
  390. /* Flush the PASID cache and IOTLB for this device.
  391. * Note that we do depend on the hardware *not* using
  392. * the PASID any more. Just as we depend on other
  393. * devices never using PASIDs that they have no right
  394. * to use. We have a *shared* PASID table, because it's
  395. * large and has to be physically contiguous. So it's
  396. * hard to be as defensive as we might like. */
  397. intel_flush_pasid_dev(svm, sdev, svm->pasid);
  398. intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
  399. kfree_rcu(sdev, rcu);
  400. if (list_empty(&svm->devs)) {
  401. idr_remove(&svm->iommu->pasid_idr, svm->pasid);
  402. if (svm->mm)
  403. mmu_notifier_unregister(&svm->notifier, svm->mm);
  404. /* We mandate that no page faults may be outstanding
  405. * for the PASID when intel_svm_unbind_mm() is called.
  406. * If that is not obeyed, subtle errors will happen.
  407. * Let's make them less subtle... */
  408. memset(svm, 0x6b, sizeof(*svm));
  409. kfree(svm);
  410. }
  411. }
  412. break;
  413. }
  414. }
  415. out:
  416. mutex_unlock(&pasid_mutex);
  417. return ret;
  418. }
  419. EXPORT_SYMBOL_GPL(intel_svm_unbind_mm);
  420. /* Page request queue descriptor */
  421. struct page_req_dsc {
  422. u64 srr:1;
  423. u64 bof:1;
  424. u64 pasid_present:1;
  425. u64 lpig:1;
  426. u64 pasid:20;
  427. u64 bus:8;
  428. u64 private:23;
  429. u64 prg_index:9;
  430. u64 rd_req:1;
  431. u64 wr_req:1;
  432. u64 exe_req:1;
  433. u64 priv_req:1;
  434. u64 devfn:8;
  435. u64 addr:52;
  436. };
  437. #define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10)
  438. static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req)
  439. {
  440. unsigned long requested = 0;
  441. if (req->exe_req)
  442. requested |= VM_EXEC;
  443. if (req->rd_req)
  444. requested |= VM_READ;
  445. if (req->wr_req)
  446. requested |= VM_WRITE;
  447. return (requested & ~vma->vm_flags) != 0;
  448. }
  449. static irqreturn_t prq_event_thread(int irq, void *d)
  450. {
  451. struct intel_iommu *iommu = d;
  452. struct intel_svm *svm = NULL;
  453. int head, tail, handled = 0;
  454. /* Clear PPR bit before reading head/tail registers, to
  455. * ensure that we get a new interrupt if needed. */
  456. writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);
  457. tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
  458. head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
  459. while (head != tail) {
  460. struct intel_svm_dev *sdev;
  461. struct vm_area_struct *vma;
  462. struct page_req_dsc *req;
  463. struct qi_desc resp;
  464. int ret, result;
  465. u64 address;
  466. handled = 1;
  467. req = &iommu->prq[head / sizeof(*req)];
  468. result = QI_RESP_FAILURE;
  469. address = (u64)req->addr << VTD_PAGE_SHIFT;
  470. if (!req->pasid_present) {
  471. pr_err("%s: Page request without PASID: %08llx %08llx\n",
  472. iommu->name, ((unsigned long long *)req)[0],
  473. ((unsigned long long *)req)[1]);
  474. goto bad_req;
  475. }
  476. if (!svm || svm->pasid != req->pasid) {
  477. rcu_read_lock();
  478. svm = idr_find(&iommu->pasid_idr, req->pasid);
  479. /* It *can't* go away, because the driver is not permitted
  480. * to unbind the mm while any page faults are outstanding.
  481. * So we only need RCU to protect the internal idr code. */
  482. rcu_read_unlock();
  483. if (!svm) {
  484. pr_err("%s: Page request for invalid PASID %d: %08llx %08llx\n",
  485. iommu->name, req->pasid, ((unsigned long long *)req)[0],
  486. ((unsigned long long *)req)[1]);
  487. goto no_pasid;
  488. }
  489. }
  490. result = QI_RESP_INVALID;
  491. /* Since we're using init_mm.pgd directly, we should never take
  492. * any faults on kernel addresses. */
  493. if (!svm->mm)
  494. goto bad_req;
  495. /* If the mm is already defunct, don't handle faults. */
  496. if (!mmget_not_zero(svm->mm))
  497. goto bad_req;
  498. down_read(&svm->mm->mmap_sem);
  499. vma = find_extend_vma(svm->mm, address);
  500. if (!vma || address < vma->vm_start)
  501. goto invalid;
  502. if (access_error(vma, req))
  503. goto invalid;
  504. ret = handle_mm_fault(vma, address,
  505. req->wr_req ? FAULT_FLAG_WRITE : 0);
  506. if (ret & VM_FAULT_ERROR)
  507. goto invalid;
  508. result = QI_RESP_SUCCESS;
  509. invalid:
  510. up_read(&svm->mm->mmap_sem);
  511. mmput(svm->mm);
  512. bad_req:
  513. /* Accounting for major/minor faults? */
  514. rcu_read_lock();
  515. list_for_each_entry_rcu(sdev, &svm->devs, list) {
  516. if (sdev->sid == PCI_DEVID(req->bus, req->devfn))
  517. break;
  518. }
  519. /* Other devices can go away, but the drivers are not permitted
  520. * to unbind while any page faults might be in flight. So it's
  521. * OK to drop the 'lock' here now we have it. */
  522. rcu_read_unlock();
  523. if (WARN_ON(&sdev->list == &svm->devs))
  524. sdev = NULL;
  525. if (sdev && sdev->ops && sdev->ops->fault_cb) {
  526. int rwxp = (req->rd_req << 3) | (req->wr_req << 2) |
  527. (req->exe_req << 1) | (req->priv_req);
  528. sdev->ops->fault_cb(sdev->dev, req->pasid, req->addr, req->private, rwxp, result);
  529. }
  530. /* We get here in the error case where the PASID lookup failed,
  531. and these can be NULL. Do not use them below this point! */
  532. sdev = NULL;
  533. svm = NULL;
  534. no_pasid:
  535. if (req->lpig) {
  536. /* Page Group Response */
  537. resp.low = QI_PGRP_PASID(req->pasid) |
  538. QI_PGRP_DID((req->bus << 8) | req->devfn) |
  539. QI_PGRP_PASID_P(req->pasid_present) |
  540. QI_PGRP_RESP_TYPE;
  541. resp.high = QI_PGRP_IDX(req->prg_index) |
  542. QI_PGRP_PRIV(req->private) | QI_PGRP_RESP_CODE(result);
  543. qi_submit_sync(&resp, iommu);
  544. } else if (req->srr) {
  545. /* Page Stream Response */
  546. resp.low = QI_PSTRM_IDX(req->prg_index) |
  547. QI_PSTRM_PRIV(req->private) | QI_PSTRM_BUS(req->bus) |
  548. QI_PSTRM_PASID(req->pasid) | QI_PSTRM_RESP_TYPE;
  549. resp.high = QI_PSTRM_ADDR(address) | QI_PSTRM_DEVFN(req->devfn) |
  550. QI_PSTRM_RESP_CODE(result);
  551. qi_submit_sync(&resp, iommu);
  552. }
  553. head = (head + sizeof(*req)) & PRQ_RING_MASK;
  554. }
  555. dmar_writeq(iommu->reg + DMAR_PQH_REG, tail);
  556. return IRQ_RETVAL(handled);
  557. }