intel-svm.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698
  1. /*
  2. * Copyright © 2015 Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * Authors: David Woodhouse <dwmw2@infradead.org>
  14. */
  15. #include <linux/intel-iommu.h>
  16. #include <linux/mmu_notifier.h>
  17. #include <linux/sched.h>
  18. #include <linux/sched/mm.h>
  19. #include <linux/slab.h>
  20. #include <linux/intel-svm.h>
  21. #include <linux/rculist.h>
  22. #include <linux/pci.h>
  23. #include <linux/pci-ats.h>
  24. #include <linux/dmar.h>
  25. #include <linux/interrupt.h>
  26. #include <asm/page.h>
  27. static irqreturn_t prq_event_thread(int irq, void *d);
  28. struct pasid_entry {
  29. u64 val;
  30. };
  31. struct pasid_state_entry {
  32. u64 val;
  33. };
  34. int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
  35. {
  36. struct page *pages;
  37. int order;
  38. /* Start at 2 because it's defined as 2^(1+PSS) */
  39. iommu->pasid_max = 2 << ecap_pss(iommu->ecap);
  40. /* Eventually I'm promised we will get a multi-level PASID table
  41. * and it won't have to be physically contiguous. Until then,
  42. * limit the size because 8MiB contiguous allocations can be hard
  43. * to come by. The limit of 0x20000, which is 1MiB for each of
  44. * the PASID and PASID-state tables, is somewhat arbitrary. */
  45. if (iommu->pasid_max > 0x20000)
  46. iommu->pasid_max = 0x20000;
  47. order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
  48. pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
  49. if (!pages) {
  50. pr_warn("IOMMU: %s: Failed to allocate PASID table\n",
  51. iommu->name);
  52. return -ENOMEM;
  53. }
  54. iommu->pasid_table = page_address(pages);
  55. pr_info("%s: Allocated order %d PASID table.\n", iommu->name, order);
  56. if (ecap_dis(iommu->ecap)) {
  57. /* Just making it explicit... */
  58. BUILD_BUG_ON(sizeof(struct pasid_entry) != sizeof(struct pasid_state_entry));
  59. pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
  60. if (pages)
  61. iommu->pasid_state_table = page_address(pages);
  62. else
  63. pr_warn("IOMMU: %s: Failed to allocate PASID state table\n",
  64. iommu->name);
  65. }
  66. idr_init(&iommu->pasid_idr);
  67. return 0;
  68. }
  69. int intel_svm_free_pasid_tables(struct intel_iommu *iommu)
  70. {
  71. int order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
  72. if (iommu->pasid_table) {
  73. free_pages((unsigned long)iommu->pasid_table, order);
  74. iommu->pasid_table = NULL;
  75. }
  76. if (iommu->pasid_state_table) {
  77. free_pages((unsigned long)iommu->pasid_state_table, order);
  78. iommu->pasid_state_table = NULL;
  79. }
  80. idr_destroy(&iommu->pasid_idr);
  81. return 0;
  82. }
  83. #define PRQ_ORDER 0
  84. int intel_svm_enable_prq(struct intel_iommu *iommu)
  85. {
  86. struct page *pages;
  87. int irq, ret;
  88. pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
  89. if (!pages) {
  90. pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
  91. iommu->name);
  92. return -ENOMEM;
  93. }
  94. iommu->prq = page_address(pages);
  95. irq = dmar_alloc_hwirq(DMAR_UNITS_SUPPORTED + iommu->seq_id, iommu->node, iommu);
  96. if (irq <= 0) {
  97. pr_err("IOMMU: %s: Failed to create IRQ vector for page request queue\n",
  98. iommu->name);
  99. ret = -EINVAL;
  100. err:
  101. free_pages((unsigned long)iommu->prq, PRQ_ORDER);
  102. iommu->prq = NULL;
  103. return ret;
  104. }
  105. iommu->pr_irq = irq;
  106. snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id);
  107. ret = request_threaded_irq(irq, NULL, prq_event_thread, IRQF_ONESHOT,
  108. iommu->prq_name, iommu);
  109. if (ret) {
  110. pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n",
  111. iommu->name);
  112. dmar_free_hwirq(irq);
  113. goto err;
  114. }
  115. dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
  116. dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
  117. dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER);
  118. return 0;
  119. }
  120. int intel_svm_finish_prq(struct intel_iommu *iommu)
  121. {
  122. dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
  123. dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
  124. dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL);
  125. free_irq(iommu->pr_irq, iommu);
  126. dmar_free_hwirq(iommu->pr_irq);
  127. iommu->pr_irq = 0;
  128. free_pages((unsigned long)iommu->prq, PRQ_ORDER);
  129. iommu->prq = NULL;
  130. return 0;
  131. }
  132. static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
  133. unsigned long address, unsigned long pages, int ih, int gl)
  134. {
  135. struct qi_desc desc;
  136. if (pages == -1) {
  137. /* For global kernel pages we have to flush them in *all* PASIDs
  138. * because that's the only option the hardware gives us. Despite
  139. * the fact that they are actually only accessible through one. */
  140. if (gl)
  141. desc.low = QI_EIOTLB_PASID(svm->pasid) | QI_EIOTLB_DID(sdev->did) |
  142. QI_EIOTLB_GRAN(QI_GRAN_ALL_ALL) | QI_EIOTLB_TYPE;
  143. else
  144. desc.low = QI_EIOTLB_PASID(svm->pasid) | QI_EIOTLB_DID(sdev->did) |
  145. QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | QI_EIOTLB_TYPE;
  146. desc.high = 0;
  147. } else {
  148. int mask = ilog2(__roundup_pow_of_two(pages));
  149. desc.low = QI_EIOTLB_PASID(svm->pasid) | QI_EIOTLB_DID(sdev->did) |
  150. QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) | QI_EIOTLB_TYPE;
  151. desc.high = QI_EIOTLB_ADDR(address) | QI_EIOTLB_GL(gl) |
  152. QI_EIOTLB_IH(ih) | QI_EIOTLB_AM(mask);
  153. }
  154. qi_submit_sync(&desc, svm->iommu);
  155. if (sdev->dev_iotlb) {
  156. desc.low = QI_DEV_EIOTLB_PASID(svm->pasid) | QI_DEV_EIOTLB_SID(sdev->sid) |
  157. QI_DEV_EIOTLB_QDEP(sdev->qdep) | QI_DEIOTLB_TYPE;
  158. if (pages == -1) {
  159. desc.high = QI_DEV_EIOTLB_ADDR(-1ULL >> 1) | QI_DEV_EIOTLB_SIZE;
  160. } else if (pages > 1) {
  161. /* The least significant zero bit indicates the size. So,
  162. * for example, an "address" value of 0x12345f000 will
  163. * flush from 0x123440000 to 0x12347ffff (256KiB). */
  164. unsigned long last = address + ((unsigned long)(pages - 1) << VTD_PAGE_SHIFT);
  165. unsigned long mask = __rounddown_pow_of_two(address ^ last);;
  166. desc.high = QI_DEV_EIOTLB_ADDR((address & ~mask) | (mask - 1)) | QI_DEV_EIOTLB_SIZE;
  167. } else {
  168. desc.high = QI_DEV_EIOTLB_ADDR(address);
  169. }
  170. qi_submit_sync(&desc, svm->iommu);
  171. }
  172. }
  173. static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
  174. unsigned long pages, int ih, int gl)
  175. {
  176. struct intel_svm_dev *sdev;
  177. /* Try deferred invalidate if available */
  178. if (svm->iommu->pasid_state_table &&
  179. !cmpxchg64(&svm->iommu->pasid_state_table[svm->pasid].val, 0, 1ULL << 63))
  180. return;
  181. rcu_read_lock();
  182. list_for_each_entry_rcu(sdev, &svm->devs, list)
  183. intel_flush_svm_range_dev(svm, sdev, address, pages, ih, gl);
  184. rcu_read_unlock();
  185. }
  186. static void intel_change_pte(struct mmu_notifier *mn, struct mm_struct *mm,
  187. unsigned long address, pte_t pte)
  188. {
  189. struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
  190. intel_flush_svm_range(svm, address, 1, 1, 0);
  191. }
  192. static void intel_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm,
  193. unsigned long address)
  194. {
  195. struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
  196. intel_flush_svm_range(svm, address, 1, 1, 0);
  197. }
  198. /* Pages have been freed at this point */
  199. static void intel_invalidate_range(struct mmu_notifier *mn,
  200. struct mm_struct *mm,
  201. unsigned long start, unsigned long end)
  202. {
  203. struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
  204. intel_flush_svm_range(svm, start,
  205. (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0, 0);
  206. }
  207. static void intel_flush_pasid_dev(struct intel_svm *svm, struct intel_svm_dev *sdev, int pasid)
  208. {
  209. struct qi_desc desc;
  210. desc.high = 0;
  211. desc.low = QI_PC_TYPE | QI_PC_DID(sdev->did) | QI_PC_PASID_SEL | QI_PC_PASID(pasid);
  212. qi_submit_sync(&desc, svm->iommu);
  213. }
  214. static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
  215. {
  216. struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
  217. struct intel_svm_dev *sdev;
  218. /* This might end up being called from exit_mmap(), *before* the page
  219. * tables are cleared. And __mmu_notifier_release() will delete us from
  220. * the list of notifiers so that our invalidate_range() callback doesn't
  221. * get called when the page tables are cleared. So we need to protect
  222. * against hardware accessing those page tables.
  223. *
  224. * We do it by clearing the entry in the PASID table and then flushing
  225. * the IOTLB and the PASID table caches. This might upset hardware;
  226. * perhaps we'll want to point the PASID to a dummy PGD (like the zero
  227. * page) so that we end up taking a fault that the hardware really
  228. * *has* to handle gracefully without affecting other processes.
  229. */
  230. svm->iommu->pasid_table[svm->pasid].val = 0;
  231. wmb();
  232. rcu_read_lock();
  233. list_for_each_entry_rcu(sdev, &svm->devs, list) {
  234. intel_flush_pasid_dev(svm, sdev, svm->pasid);
  235. intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
  236. }
  237. rcu_read_unlock();
  238. }
  239. static const struct mmu_notifier_ops intel_mmuops = {
  240. .release = intel_mm_release,
  241. .change_pte = intel_change_pte,
  242. .invalidate_page = intel_invalidate_page,
  243. .invalidate_range = intel_invalidate_range,
  244. };
  245. static DEFINE_MUTEX(pasid_mutex);
  246. int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops)
  247. {
  248. struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
  249. struct intel_svm_dev *sdev;
  250. struct intel_svm *svm = NULL;
  251. struct mm_struct *mm = NULL;
  252. int pasid_max;
  253. int ret;
  254. if (WARN_ON(!iommu))
  255. return -EINVAL;
  256. if (dev_is_pci(dev)) {
  257. pasid_max = pci_max_pasids(to_pci_dev(dev));
  258. if (pasid_max < 0)
  259. return -EINVAL;
  260. } else
  261. pasid_max = 1 << 20;
  262. if ((flags & SVM_FLAG_SUPERVISOR_MODE)) {
  263. if (!ecap_srs(iommu->ecap))
  264. return -EINVAL;
  265. } else if (pasid) {
  266. mm = get_task_mm(current);
  267. BUG_ON(!mm);
  268. }
  269. mutex_lock(&pasid_mutex);
  270. if (pasid && !(flags & SVM_FLAG_PRIVATE_PASID)) {
  271. int i;
  272. idr_for_each_entry(&iommu->pasid_idr, svm, i) {
  273. if (svm->mm != mm ||
  274. (svm->flags & SVM_FLAG_PRIVATE_PASID))
  275. continue;
  276. if (svm->pasid >= pasid_max) {
  277. dev_warn(dev,
  278. "Limited PASID width. Cannot use existing PASID %d\n",
  279. svm->pasid);
  280. ret = -ENOSPC;
  281. goto out;
  282. }
  283. list_for_each_entry(sdev, &svm->devs, list) {
  284. if (dev == sdev->dev) {
  285. if (sdev->ops != ops) {
  286. ret = -EBUSY;
  287. goto out;
  288. }
  289. sdev->users++;
  290. goto success;
  291. }
  292. }
  293. break;
  294. }
  295. }
  296. sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
  297. if (!sdev) {
  298. ret = -ENOMEM;
  299. goto out;
  300. }
  301. sdev->dev = dev;
  302. ret = intel_iommu_enable_pasid(iommu, sdev);
  303. if (ret || !pasid) {
  304. /* If they don't actually want to assign a PASID, this is
  305. * just an enabling check/preparation. */
  306. kfree(sdev);
  307. goto out;
  308. }
  309. /* Finish the setup now we know we're keeping it */
  310. sdev->users = 1;
  311. sdev->ops = ops;
  312. init_rcu_head(&sdev->rcu);
  313. if (!svm) {
  314. svm = kzalloc(sizeof(*svm), GFP_KERNEL);
  315. if (!svm) {
  316. ret = -ENOMEM;
  317. kfree(sdev);
  318. goto out;
  319. }
  320. svm->iommu = iommu;
  321. if (pasid_max > iommu->pasid_max)
  322. pasid_max = iommu->pasid_max;
  323. /* Do not use PASID 0 in caching mode (virtualised IOMMU) */
  324. ret = idr_alloc(&iommu->pasid_idr, svm,
  325. !!cap_caching_mode(iommu->cap),
  326. pasid_max - 1, GFP_KERNEL);
  327. if (ret < 0) {
  328. kfree(svm);
  329. goto out;
  330. }
  331. svm->pasid = ret;
  332. svm->notifier.ops = &intel_mmuops;
  333. svm->mm = mm;
  334. svm->flags = flags;
  335. INIT_LIST_HEAD_RCU(&svm->devs);
  336. ret = -ENOMEM;
  337. if (mm) {
  338. ret = mmu_notifier_register(&svm->notifier, mm);
  339. if (ret) {
  340. idr_remove(&svm->iommu->pasid_idr, svm->pasid);
  341. kfree(svm);
  342. kfree(sdev);
  343. goto out;
  344. }
  345. iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) | 1;
  346. } else
  347. iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11);
  348. wmb();
  349. /* In caching mode, we still have to flush with PASID 0 when
  350. * a PASID table entry becomes present. Not entirely clear
  351. * *why* that would be the case — surely we could just issue
  352. * a flush with the PASID value that we've changed? The PASID
  353. * is the index into the table, after all. It's not like domain
  354. * IDs in the case of the equivalent context-entry change in
  355. * caching mode. And for that matter it's not entirely clear why
  356. * a VMM would be in the business of caching the PASID table
  357. * anyway. Surely that can be left entirely to the guest? */
  358. if (cap_caching_mode(iommu->cap))
  359. intel_flush_pasid_dev(svm, sdev, 0);
  360. }
  361. list_add_rcu(&sdev->list, &svm->devs);
  362. success:
  363. *pasid = svm->pasid;
  364. ret = 0;
  365. out:
  366. mutex_unlock(&pasid_mutex);
  367. if (mm)
  368. mmput(mm);
  369. return ret;
  370. }
  371. EXPORT_SYMBOL_GPL(intel_svm_bind_mm);
  372. int intel_svm_unbind_mm(struct device *dev, int pasid)
  373. {
  374. struct intel_svm_dev *sdev;
  375. struct intel_iommu *iommu;
  376. struct intel_svm *svm;
  377. int ret = -EINVAL;
  378. mutex_lock(&pasid_mutex);
  379. iommu = intel_svm_device_to_iommu(dev);
  380. if (!iommu || !iommu->pasid_table)
  381. goto out;
  382. svm = idr_find(&iommu->pasid_idr, pasid);
  383. if (!svm)
  384. goto out;
  385. list_for_each_entry(sdev, &svm->devs, list) {
  386. if (dev == sdev->dev) {
  387. ret = 0;
  388. sdev->users--;
  389. if (!sdev->users) {
  390. list_del_rcu(&sdev->list);
  391. /* Flush the PASID cache and IOTLB for this device.
  392. * Note that we do depend on the hardware *not* using
  393. * the PASID any more. Just as we depend on other
  394. * devices never using PASIDs that they have no right
  395. * to use. We have a *shared* PASID table, because it's
  396. * large and has to be physically contiguous. So it's
  397. * hard to be as defensive as we might like. */
  398. intel_flush_pasid_dev(svm, sdev, svm->pasid);
  399. intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
  400. kfree_rcu(sdev, rcu);
  401. if (list_empty(&svm->devs)) {
  402. idr_remove(&svm->iommu->pasid_idr, svm->pasid);
  403. if (svm->mm)
  404. mmu_notifier_unregister(&svm->notifier, svm->mm);
  405. /* We mandate that no page faults may be outstanding
  406. * for the PASID when intel_svm_unbind_mm() is called.
  407. * If that is not obeyed, subtle errors will happen.
  408. * Let's make them less subtle... */
  409. memset(svm, 0x6b, sizeof(*svm));
  410. kfree(svm);
  411. }
  412. }
  413. break;
  414. }
  415. }
  416. out:
  417. mutex_unlock(&pasid_mutex);
  418. return ret;
  419. }
  420. EXPORT_SYMBOL_GPL(intel_svm_unbind_mm);
  421. int intel_svm_is_pasid_valid(struct device *dev, int pasid)
  422. {
  423. struct intel_iommu *iommu;
  424. struct intel_svm *svm;
  425. int ret = -EINVAL;
  426. mutex_lock(&pasid_mutex);
  427. iommu = intel_svm_device_to_iommu(dev);
  428. if (!iommu || !iommu->pasid_table)
  429. goto out;
  430. svm = idr_find(&iommu->pasid_idr, pasid);
  431. if (!svm)
  432. goto out;
  433. /* init_mm is used in this case */
  434. if (!svm->mm)
  435. ret = 1;
  436. else if (atomic_read(&svm->mm->mm_users) > 0)
  437. ret = 1;
  438. else
  439. ret = 0;
  440. out:
  441. mutex_unlock(&pasid_mutex);
  442. return ret;
  443. }
  444. EXPORT_SYMBOL_GPL(intel_svm_is_pasid_valid);
  445. /* Page request queue descriptor */
  446. struct page_req_dsc {
  447. u64 srr:1;
  448. u64 bof:1;
  449. u64 pasid_present:1;
  450. u64 lpig:1;
  451. u64 pasid:20;
  452. u64 bus:8;
  453. u64 private:23;
  454. u64 prg_index:9;
  455. u64 rd_req:1;
  456. u64 wr_req:1;
  457. u64 exe_req:1;
  458. u64 priv_req:1;
  459. u64 devfn:8;
  460. u64 addr:52;
  461. };
  462. #define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10)
  463. static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req)
  464. {
  465. unsigned long requested = 0;
  466. if (req->exe_req)
  467. requested |= VM_EXEC;
  468. if (req->rd_req)
  469. requested |= VM_READ;
  470. if (req->wr_req)
  471. requested |= VM_WRITE;
  472. return (requested & ~vma->vm_flags) != 0;
  473. }
  474. static bool is_canonical_address(u64 addr)
  475. {
  476. int shift = 64 - (__VIRTUAL_MASK_SHIFT + 1);
  477. long saddr = (long) addr;
  478. return (((saddr << shift) >> shift) == saddr);
  479. }
  480. static irqreturn_t prq_event_thread(int irq, void *d)
  481. {
  482. struct intel_iommu *iommu = d;
  483. struct intel_svm *svm = NULL;
  484. int head, tail, handled = 0;
  485. /* Clear PPR bit before reading head/tail registers, to
  486. * ensure that we get a new interrupt if needed. */
  487. writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);
  488. tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
  489. head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
  490. while (head != tail) {
  491. struct intel_svm_dev *sdev;
  492. struct vm_area_struct *vma;
  493. struct page_req_dsc *req;
  494. struct qi_desc resp;
  495. int ret, result;
  496. u64 address;
  497. handled = 1;
  498. req = &iommu->prq[head / sizeof(*req)];
  499. result = QI_RESP_FAILURE;
  500. address = (u64)req->addr << VTD_PAGE_SHIFT;
  501. if (!req->pasid_present) {
  502. pr_err("%s: Page request without PASID: %08llx %08llx\n",
  503. iommu->name, ((unsigned long long *)req)[0],
  504. ((unsigned long long *)req)[1]);
  505. goto bad_req;
  506. }
  507. if (!svm || svm->pasid != req->pasid) {
  508. rcu_read_lock();
  509. svm = idr_find(&iommu->pasid_idr, req->pasid);
  510. /* It *can't* go away, because the driver is not permitted
  511. * to unbind the mm while any page faults are outstanding.
  512. * So we only need RCU to protect the internal idr code. */
  513. rcu_read_unlock();
  514. if (!svm) {
  515. pr_err("%s: Page request for invalid PASID %d: %08llx %08llx\n",
  516. iommu->name, req->pasid, ((unsigned long long *)req)[0],
  517. ((unsigned long long *)req)[1]);
  518. goto no_pasid;
  519. }
  520. }
  521. result = QI_RESP_INVALID;
  522. /* Since we're using init_mm.pgd directly, we should never take
  523. * any faults on kernel addresses. */
  524. if (!svm->mm)
  525. goto bad_req;
  526. /* If the mm is already defunct, don't handle faults. */
  527. if (!mmget_not_zero(svm->mm))
  528. goto bad_req;
  529. /* If address is not canonical, return invalid response */
  530. if (!is_canonical_address(address))
  531. goto bad_req;
  532. down_read(&svm->mm->mmap_sem);
  533. vma = find_extend_vma(svm->mm, address);
  534. if (!vma || address < vma->vm_start)
  535. goto invalid;
  536. if (access_error(vma, req))
  537. goto invalid;
  538. ret = handle_mm_fault(vma, address,
  539. req->wr_req ? FAULT_FLAG_WRITE : 0);
  540. if (ret & VM_FAULT_ERROR)
  541. goto invalid;
  542. result = QI_RESP_SUCCESS;
  543. invalid:
  544. up_read(&svm->mm->mmap_sem);
  545. mmput(svm->mm);
  546. bad_req:
  547. /* Accounting for major/minor faults? */
  548. rcu_read_lock();
  549. list_for_each_entry_rcu(sdev, &svm->devs, list) {
  550. if (sdev->sid == PCI_DEVID(req->bus, req->devfn))
  551. break;
  552. }
  553. /* Other devices can go away, but the drivers are not permitted
  554. * to unbind while any page faults might be in flight. So it's
  555. * OK to drop the 'lock' here now we have it. */
  556. rcu_read_unlock();
  557. if (WARN_ON(&sdev->list == &svm->devs))
  558. sdev = NULL;
  559. if (sdev && sdev->ops && sdev->ops->fault_cb) {
  560. int rwxp = (req->rd_req << 3) | (req->wr_req << 2) |
  561. (req->exe_req << 1) | (req->priv_req);
  562. sdev->ops->fault_cb(sdev->dev, req->pasid, req->addr, req->private, rwxp, result);
  563. }
  564. /* We get here in the error case where the PASID lookup failed,
  565. and these can be NULL. Do not use them below this point! */
  566. sdev = NULL;
  567. svm = NULL;
  568. no_pasid:
  569. if (req->lpig) {
  570. /* Page Group Response */
  571. resp.low = QI_PGRP_PASID(req->pasid) |
  572. QI_PGRP_DID((req->bus << 8) | req->devfn) |
  573. QI_PGRP_PASID_P(req->pasid_present) |
  574. QI_PGRP_RESP_TYPE;
  575. resp.high = QI_PGRP_IDX(req->prg_index) |
  576. QI_PGRP_PRIV(req->private) | QI_PGRP_RESP_CODE(result);
  577. qi_submit_sync(&resp, iommu);
  578. } else if (req->srr) {
  579. /* Page Stream Response */
  580. resp.low = QI_PSTRM_IDX(req->prg_index) |
  581. QI_PSTRM_PRIV(req->private) | QI_PSTRM_BUS(req->bus) |
  582. QI_PSTRM_PASID(req->pasid) | QI_PSTRM_RESP_TYPE;
  583. resp.high = QI_PSTRM_ADDR(address) | QI_PSTRM_DEVFN(req->devfn) |
  584. QI_PSTRM_RESP_CODE(result);
  585. qi_submit_sync(&resp, iommu);
  586. }
  587. head = (head + sizeof(*req)) & PRQ_RING_MASK;
  588. }
  589. dmar_writeq(iommu->reg + DMAR_PQH_REG, tail);
  590. return IRQ_RETVAL(handled);
  591. }