intel-svm.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717
  1. /*
  2. * Copyright © 2015 Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * Authors: David Woodhouse <dwmw2@infradead.org>
  14. */
  15. #include <linux/intel-iommu.h>
  16. #include <linux/mmu_notifier.h>
  17. #include <linux/sched.h>
  18. #include <linux/sched/mm.h>
  19. #include <linux/slab.h>
  20. #include <linux/intel-svm.h>
  21. #include <linux/rculist.h>
  22. #include <linux/pci.h>
  23. #include <linux/pci-ats.h>
  24. #include <linux/dmar.h>
  25. #include <linux/interrupt.h>
  26. #include <asm/page.h>
  27. #include "intel-pasid.h"
  28. #define PASID_ENTRY_P BIT_ULL(0)
  29. #define PASID_ENTRY_FLPM_5LP BIT_ULL(9)
  30. #define PASID_ENTRY_SRE BIT_ULL(11)
  31. static irqreturn_t prq_event_thread(int irq, void *d);
  32. struct pasid_entry {
  33. u64 val;
  34. };
  35. struct pasid_state_entry {
  36. u64 val;
  37. };
  38. int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
  39. {
  40. struct page *pages;
  41. int order;
  42. if (cpu_feature_enabled(X86_FEATURE_GBPAGES) &&
  43. !cap_fl1gp_support(iommu->cap))
  44. return -EINVAL;
  45. if (cpu_feature_enabled(X86_FEATURE_LA57) &&
  46. !cap_5lp_support(iommu->cap))
  47. return -EINVAL;
  48. /* Start at 2 because it's defined as 2^(1+PSS) */
  49. iommu->pasid_max = 2 << ecap_pss(iommu->ecap);
  50. /* Eventually I'm promised we will get a multi-level PASID table
  51. * and it won't have to be physically contiguous. Until then,
  52. * limit the size because 8MiB contiguous allocations can be hard
  53. * to come by. The limit of 0x20000, which is 1MiB for each of
  54. * the PASID and PASID-state tables, is somewhat arbitrary. */
  55. if (iommu->pasid_max > 0x20000)
  56. iommu->pasid_max = 0x20000;
  57. order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
  58. pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
  59. if (!pages) {
  60. pr_warn("IOMMU: %s: Failed to allocate PASID table\n",
  61. iommu->name);
  62. return -ENOMEM;
  63. }
  64. iommu->pasid_table = page_address(pages);
  65. pr_info("%s: Allocated order %d PASID table.\n", iommu->name, order);
  66. if (ecap_dis(iommu->ecap)) {
  67. /* Just making it explicit... */
  68. BUILD_BUG_ON(sizeof(struct pasid_entry) != sizeof(struct pasid_state_entry));
  69. pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
  70. if (pages)
  71. iommu->pasid_state_table = page_address(pages);
  72. else
  73. pr_warn("IOMMU: %s: Failed to allocate PASID state table\n",
  74. iommu->name);
  75. }
  76. return 0;
  77. }
  78. int intel_svm_free_pasid_tables(struct intel_iommu *iommu)
  79. {
  80. int order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
  81. if (iommu->pasid_table) {
  82. free_pages((unsigned long)iommu->pasid_table, order);
  83. iommu->pasid_table = NULL;
  84. }
  85. if (iommu->pasid_state_table) {
  86. free_pages((unsigned long)iommu->pasid_state_table, order);
  87. iommu->pasid_state_table = NULL;
  88. }
  89. return 0;
  90. }
  91. #define PRQ_ORDER 0
  92. int intel_svm_enable_prq(struct intel_iommu *iommu)
  93. {
  94. struct page *pages;
  95. int irq, ret;
  96. pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
  97. if (!pages) {
  98. pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
  99. iommu->name);
  100. return -ENOMEM;
  101. }
  102. iommu->prq = page_address(pages);
  103. irq = dmar_alloc_hwirq(DMAR_UNITS_SUPPORTED + iommu->seq_id, iommu->node, iommu);
  104. if (irq <= 0) {
  105. pr_err("IOMMU: %s: Failed to create IRQ vector for page request queue\n",
  106. iommu->name);
  107. ret = -EINVAL;
  108. err:
  109. free_pages((unsigned long)iommu->prq, PRQ_ORDER);
  110. iommu->prq = NULL;
  111. return ret;
  112. }
  113. iommu->pr_irq = irq;
  114. snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id);
  115. ret = request_threaded_irq(irq, NULL, prq_event_thread, IRQF_ONESHOT,
  116. iommu->prq_name, iommu);
  117. if (ret) {
  118. pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n",
  119. iommu->name);
  120. dmar_free_hwirq(irq);
  121. iommu->pr_irq = 0;
  122. goto err;
  123. }
  124. dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
  125. dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
  126. dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER);
  127. return 0;
  128. }
  129. int intel_svm_finish_prq(struct intel_iommu *iommu)
  130. {
  131. dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
  132. dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
  133. dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL);
  134. if (iommu->pr_irq) {
  135. free_irq(iommu->pr_irq, iommu);
  136. dmar_free_hwirq(iommu->pr_irq);
  137. iommu->pr_irq = 0;
  138. }
  139. free_pages((unsigned long)iommu->prq, PRQ_ORDER);
  140. iommu->prq = NULL;
  141. return 0;
  142. }
  143. static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
  144. unsigned long address, unsigned long pages, int ih, int gl)
  145. {
  146. struct qi_desc desc;
  147. if (pages == -1) {
  148. /* For global kernel pages we have to flush them in *all* PASIDs
  149. * because that's the only option the hardware gives us. Despite
  150. * the fact that they are actually only accessible through one. */
  151. if (gl)
  152. desc.low = QI_EIOTLB_PASID(svm->pasid) | QI_EIOTLB_DID(sdev->did) |
  153. QI_EIOTLB_GRAN(QI_GRAN_ALL_ALL) | QI_EIOTLB_TYPE;
  154. else
  155. desc.low = QI_EIOTLB_PASID(svm->pasid) | QI_EIOTLB_DID(sdev->did) |
  156. QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | QI_EIOTLB_TYPE;
  157. desc.high = 0;
  158. } else {
  159. int mask = ilog2(__roundup_pow_of_two(pages));
  160. desc.low = QI_EIOTLB_PASID(svm->pasid) | QI_EIOTLB_DID(sdev->did) |
  161. QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) | QI_EIOTLB_TYPE;
  162. desc.high = QI_EIOTLB_ADDR(address) | QI_EIOTLB_GL(gl) |
  163. QI_EIOTLB_IH(ih) | QI_EIOTLB_AM(mask);
  164. }
  165. qi_submit_sync(&desc, svm->iommu);
  166. if (sdev->dev_iotlb) {
  167. desc.low = QI_DEV_EIOTLB_PASID(svm->pasid) | QI_DEV_EIOTLB_SID(sdev->sid) |
  168. QI_DEV_EIOTLB_QDEP(sdev->qdep) | QI_DEIOTLB_TYPE;
  169. if (pages == -1) {
  170. desc.high = QI_DEV_EIOTLB_ADDR(-1ULL >> 1) | QI_DEV_EIOTLB_SIZE;
  171. } else if (pages > 1) {
  172. /* The least significant zero bit indicates the size. So,
  173. * for example, an "address" value of 0x12345f000 will
  174. * flush from 0x123440000 to 0x12347ffff (256KiB). */
  175. unsigned long last = address + ((unsigned long)(pages - 1) << VTD_PAGE_SHIFT);
  176. unsigned long mask = __rounddown_pow_of_two(address ^ last);
  177. desc.high = QI_DEV_EIOTLB_ADDR((address & ~mask) | (mask - 1)) | QI_DEV_EIOTLB_SIZE;
  178. } else {
  179. desc.high = QI_DEV_EIOTLB_ADDR(address);
  180. }
  181. qi_submit_sync(&desc, svm->iommu);
  182. }
  183. }
  184. static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
  185. unsigned long pages, int ih, int gl)
  186. {
  187. struct intel_svm_dev *sdev;
  188. /* Try deferred invalidate if available */
  189. if (svm->iommu->pasid_state_table &&
  190. !cmpxchg64(&svm->iommu->pasid_state_table[svm->pasid].val, 0, 1ULL << 63))
  191. return;
  192. rcu_read_lock();
  193. list_for_each_entry_rcu(sdev, &svm->devs, list)
  194. intel_flush_svm_range_dev(svm, sdev, address, pages, ih, gl);
  195. rcu_read_unlock();
  196. }
  197. static void intel_change_pte(struct mmu_notifier *mn, struct mm_struct *mm,
  198. unsigned long address, pte_t pte)
  199. {
  200. struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
  201. intel_flush_svm_range(svm, address, 1, 1, 0);
  202. }
  203. /* Pages have been freed at this point */
  204. static void intel_invalidate_range(struct mmu_notifier *mn,
  205. struct mm_struct *mm,
  206. unsigned long start, unsigned long end)
  207. {
  208. struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
  209. intel_flush_svm_range(svm, start,
  210. (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0, 0);
  211. }
  212. static void intel_flush_pasid_dev(struct intel_svm *svm, struct intel_svm_dev *sdev, int pasid)
  213. {
  214. struct qi_desc desc;
  215. desc.high = 0;
  216. desc.low = QI_PC_TYPE | QI_PC_DID(sdev->did) | QI_PC_PASID_SEL | QI_PC_PASID(pasid);
  217. qi_submit_sync(&desc, svm->iommu);
  218. }
  219. static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
  220. {
  221. struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
  222. struct intel_svm_dev *sdev;
  223. /* This might end up being called from exit_mmap(), *before* the page
  224. * tables are cleared. And __mmu_notifier_release() will delete us from
  225. * the list of notifiers so that our invalidate_range() callback doesn't
  226. * get called when the page tables are cleared. So we need to protect
  227. * against hardware accessing those page tables.
  228. *
  229. * We do it by clearing the entry in the PASID table and then flushing
  230. * the IOTLB and the PASID table caches. This might upset hardware;
  231. * perhaps we'll want to point the PASID to a dummy PGD (like the zero
  232. * page) so that we end up taking a fault that the hardware really
  233. * *has* to handle gracefully without affecting other processes.
  234. */
  235. svm->iommu->pasid_table[svm->pasid].val = 0;
  236. wmb();
  237. rcu_read_lock();
  238. list_for_each_entry_rcu(sdev, &svm->devs, list) {
  239. intel_flush_pasid_dev(svm, sdev, svm->pasid);
  240. intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
  241. }
  242. rcu_read_unlock();
  243. }
  244. static const struct mmu_notifier_ops intel_mmuops = {
  245. .flags = MMU_INVALIDATE_DOES_NOT_BLOCK,
  246. .release = intel_mm_release,
  247. .change_pte = intel_change_pte,
  248. .invalidate_range = intel_invalidate_range,
  249. };
  250. static DEFINE_MUTEX(pasid_mutex);
  251. static LIST_HEAD(global_svm_list);
  252. int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops)
  253. {
  254. struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
  255. struct intel_svm_dev *sdev;
  256. struct intel_svm *svm = NULL;
  257. struct mm_struct *mm = NULL;
  258. u64 pasid_entry_val;
  259. int pasid_max;
  260. int ret;
  261. if (!iommu || !iommu->pasid_table)
  262. return -EINVAL;
  263. if (dev_is_pci(dev)) {
  264. pasid_max = pci_max_pasids(to_pci_dev(dev));
  265. if (pasid_max < 0)
  266. return -EINVAL;
  267. } else
  268. pasid_max = 1 << 20;
  269. if (flags & SVM_FLAG_SUPERVISOR_MODE) {
  270. if (!ecap_srs(iommu->ecap))
  271. return -EINVAL;
  272. } else if (pasid) {
  273. mm = get_task_mm(current);
  274. BUG_ON(!mm);
  275. }
  276. mutex_lock(&pasid_mutex);
  277. if (pasid && !(flags & SVM_FLAG_PRIVATE_PASID)) {
  278. struct intel_svm *t;
  279. list_for_each_entry(t, &global_svm_list, list) {
  280. if (t->mm != mm || (t->flags & SVM_FLAG_PRIVATE_PASID))
  281. continue;
  282. svm = t;
  283. if (svm->pasid >= pasid_max) {
  284. dev_warn(dev,
  285. "Limited PASID width. Cannot use existing PASID %d\n",
  286. svm->pasid);
  287. ret = -ENOSPC;
  288. goto out;
  289. }
  290. list_for_each_entry(sdev, &svm->devs, list) {
  291. if (dev == sdev->dev) {
  292. if (sdev->ops != ops) {
  293. ret = -EBUSY;
  294. goto out;
  295. }
  296. sdev->users++;
  297. goto success;
  298. }
  299. }
  300. break;
  301. }
  302. }
  303. sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
  304. if (!sdev) {
  305. ret = -ENOMEM;
  306. goto out;
  307. }
  308. sdev->dev = dev;
  309. ret = intel_iommu_enable_pasid(iommu, sdev);
  310. if (ret || !pasid) {
  311. /* If they don't actually want to assign a PASID, this is
  312. * just an enabling check/preparation. */
  313. kfree(sdev);
  314. goto out;
  315. }
  316. /* Finish the setup now we know we're keeping it */
  317. sdev->users = 1;
  318. sdev->ops = ops;
  319. init_rcu_head(&sdev->rcu);
  320. if (!svm) {
  321. svm = kzalloc(sizeof(*svm), GFP_KERNEL);
  322. if (!svm) {
  323. ret = -ENOMEM;
  324. kfree(sdev);
  325. goto out;
  326. }
  327. svm->iommu = iommu;
  328. if (pasid_max > iommu->pasid_max)
  329. pasid_max = iommu->pasid_max;
  330. /* Do not use PASID 0 in caching mode (virtualised IOMMU) */
  331. ret = intel_pasid_alloc_id(svm,
  332. !!cap_caching_mode(iommu->cap),
  333. pasid_max - 1, GFP_KERNEL);
  334. if (ret < 0) {
  335. kfree(svm);
  336. kfree(sdev);
  337. goto out;
  338. }
  339. svm->pasid = ret;
  340. svm->notifier.ops = &intel_mmuops;
  341. svm->mm = mm;
  342. svm->flags = flags;
  343. INIT_LIST_HEAD_RCU(&svm->devs);
  344. INIT_LIST_HEAD(&svm->list);
  345. ret = -ENOMEM;
  346. if (mm) {
  347. ret = mmu_notifier_register(&svm->notifier, mm);
  348. if (ret) {
  349. intel_pasid_free_id(svm->pasid);
  350. kfree(svm);
  351. kfree(sdev);
  352. goto out;
  353. }
  354. pasid_entry_val = (u64)__pa(mm->pgd) | PASID_ENTRY_P;
  355. } else
  356. pasid_entry_val = (u64)__pa(init_mm.pgd) |
  357. PASID_ENTRY_P | PASID_ENTRY_SRE;
  358. if (cpu_feature_enabled(X86_FEATURE_LA57))
  359. pasid_entry_val |= PASID_ENTRY_FLPM_5LP;
  360. iommu->pasid_table[svm->pasid].val = pasid_entry_val;
  361. wmb();
  362. /*
  363. * Flush PASID cache when a PASID table entry becomes
  364. * present.
  365. */
  366. if (cap_caching_mode(iommu->cap))
  367. intel_flush_pasid_dev(svm, sdev, svm->pasid);
  368. list_add_tail(&svm->list, &global_svm_list);
  369. }
  370. list_add_rcu(&sdev->list, &svm->devs);
  371. success:
  372. *pasid = svm->pasid;
  373. ret = 0;
  374. out:
  375. mutex_unlock(&pasid_mutex);
  376. if (mm)
  377. mmput(mm);
  378. return ret;
  379. }
  380. EXPORT_SYMBOL_GPL(intel_svm_bind_mm);
  381. int intel_svm_unbind_mm(struct device *dev, int pasid)
  382. {
  383. struct intel_svm_dev *sdev;
  384. struct intel_iommu *iommu;
  385. struct intel_svm *svm;
  386. int ret = -EINVAL;
  387. mutex_lock(&pasid_mutex);
  388. iommu = intel_svm_device_to_iommu(dev);
  389. if (!iommu || !iommu->pasid_table)
  390. goto out;
  391. svm = intel_pasid_lookup_id(pasid);
  392. if (!svm)
  393. goto out;
  394. list_for_each_entry(sdev, &svm->devs, list) {
  395. if (dev == sdev->dev) {
  396. ret = 0;
  397. sdev->users--;
  398. if (!sdev->users) {
  399. list_del_rcu(&sdev->list);
  400. /* Flush the PASID cache and IOTLB for this device.
  401. * Note that we do depend on the hardware *not* using
  402. * the PASID any more. Just as we depend on other
  403. * devices never using PASIDs that they have no right
  404. * to use. We have a *shared* PASID table, because it's
  405. * large and has to be physically contiguous. So it's
  406. * hard to be as defensive as we might like. */
  407. intel_flush_pasid_dev(svm, sdev, svm->pasid);
  408. intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
  409. kfree_rcu(sdev, rcu);
  410. if (list_empty(&svm->devs)) {
  411. svm->iommu->pasid_table[svm->pasid].val = 0;
  412. wmb();
  413. intel_pasid_free_id(svm->pasid);
  414. if (svm->mm)
  415. mmu_notifier_unregister(&svm->notifier, svm->mm);
  416. list_del(&svm->list);
  417. /* We mandate that no page faults may be outstanding
  418. * for the PASID when intel_svm_unbind_mm() is called.
  419. * If that is not obeyed, subtle errors will happen.
  420. * Let's make them less subtle... */
  421. memset(svm, 0x6b, sizeof(*svm));
  422. kfree(svm);
  423. }
  424. }
  425. break;
  426. }
  427. }
  428. out:
  429. mutex_unlock(&pasid_mutex);
  430. return ret;
  431. }
  432. EXPORT_SYMBOL_GPL(intel_svm_unbind_mm);
  433. int intel_svm_is_pasid_valid(struct device *dev, int pasid)
  434. {
  435. struct intel_iommu *iommu;
  436. struct intel_svm *svm;
  437. int ret = -EINVAL;
  438. mutex_lock(&pasid_mutex);
  439. iommu = intel_svm_device_to_iommu(dev);
  440. if (!iommu || !iommu->pasid_table)
  441. goto out;
  442. svm = intel_pasid_lookup_id(pasid);
  443. if (!svm)
  444. goto out;
  445. /* init_mm is used in this case */
  446. if (!svm->mm)
  447. ret = 1;
  448. else if (atomic_read(&svm->mm->mm_users) > 0)
  449. ret = 1;
  450. else
  451. ret = 0;
  452. out:
  453. mutex_unlock(&pasid_mutex);
  454. return ret;
  455. }
  456. EXPORT_SYMBOL_GPL(intel_svm_is_pasid_valid);
  457. /* Page request queue descriptor */
  458. struct page_req_dsc {
  459. u64 srr:1;
  460. u64 bof:1;
  461. u64 pasid_present:1;
  462. u64 lpig:1;
  463. u64 pasid:20;
  464. u64 bus:8;
  465. u64 private:23;
  466. u64 prg_index:9;
  467. u64 rd_req:1;
  468. u64 wr_req:1;
  469. u64 exe_req:1;
  470. u64 priv_req:1;
  471. u64 devfn:8;
  472. u64 addr:52;
  473. };
  474. #define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10)
  475. static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req)
  476. {
  477. unsigned long requested = 0;
  478. if (req->exe_req)
  479. requested |= VM_EXEC;
  480. if (req->rd_req)
  481. requested |= VM_READ;
  482. if (req->wr_req)
  483. requested |= VM_WRITE;
  484. return (requested & ~vma->vm_flags) != 0;
  485. }
  486. static bool is_canonical_address(u64 addr)
  487. {
  488. int shift = 64 - (__VIRTUAL_MASK_SHIFT + 1);
  489. long saddr = (long) addr;
  490. return (((saddr << shift) >> shift) == saddr);
  491. }
  492. static irqreturn_t prq_event_thread(int irq, void *d)
  493. {
  494. struct intel_iommu *iommu = d;
  495. struct intel_svm *svm = NULL;
  496. int head, tail, handled = 0;
  497. /* Clear PPR bit before reading head/tail registers, to
  498. * ensure that we get a new interrupt if needed. */
  499. writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);
  500. tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
  501. head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
  502. while (head != tail) {
  503. struct intel_svm_dev *sdev;
  504. struct vm_area_struct *vma;
  505. struct page_req_dsc *req;
  506. struct qi_desc resp;
  507. int ret, result;
  508. u64 address;
  509. handled = 1;
  510. req = &iommu->prq[head / sizeof(*req)];
  511. result = QI_RESP_FAILURE;
  512. address = (u64)req->addr << VTD_PAGE_SHIFT;
  513. if (!req->pasid_present) {
  514. pr_err("%s: Page request without PASID: %08llx %08llx\n",
  515. iommu->name, ((unsigned long long *)req)[0],
  516. ((unsigned long long *)req)[1]);
  517. goto bad_req;
  518. }
  519. if (!svm || svm->pasid != req->pasid) {
  520. rcu_read_lock();
  521. svm = intel_pasid_lookup_id(req->pasid);
  522. /* It *can't* go away, because the driver is not permitted
  523. * to unbind the mm while any page faults are outstanding.
  524. * So we only need RCU to protect the internal idr code. */
  525. rcu_read_unlock();
  526. if (!svm) {
  527. pr_err("%s: Page request for invalid PASID %d: %08llx %08llx\n",
  528. iommu->name, req->pasid, ((unsigned long long *)req)[0],
  529. ((unsigned long long *)req)[1]);
  530. goto no_pasid;
  531. }
  532. }
  533. result = QI_RESP_INVALID;
  534. /* Since we're using init_mm.pgd directly, we should never take
  535. * any faults on kernel addresses. */
  536. if (!svm->mm)
  537. goto bad_req;
  538. /* If the mm is already defunct, don't handle faults. */
  539. if (!mmget_not_zero(svm->mm))
  540. goto bad_req;
  541. /* If address is not canonical, return invalid response */
  542. if (!is_canonical_address(address))
  543. goto bad_req;
  544. down_read(&svm->mm->mmap_sem);
  545. vma = find_extend_vma(svm->mm, address);
  546. if (!vma || address < vma->vm_start)
  547. goto invalid;
  548. if (access_error(vma, req))
  549. goto invalid;
  550. ret = handle_mm_fault(vma, address,
  551. req->wr_req ? FAULT_FLAG_WRITE : 0);
  552. if (ret & VM_FAULT_ERROR)
  553. goto invalid;
  554. result = QI_RESP_SUCCESS;
  555. invalid:
  556. up_read(&svm->mm->mmap_sem);
  557. mmput(svm->mm);
  558. bad_req:
  559. /* Accounting for major/minor faults? */
  560. rcu_read_lock();
  561. list_for_each_entry_rcu(sdev, &svm->devs, list) {
  562. if (sdev->sid == PCI_DEVID(req->bus, req->devfn))
  563. break;
  564. }
  565. /* Other devices can go away, but the drivers are not permitted
  566. * to unbind while any page faults might be in flight. So it's
  567. * OK to drop the 'lock' here now we have it. */
  568. rcu_read_unlock();
  569. if (WARN_ON(&sdev->list == &svm->devs))
  570. sdev = NULL;
  571. if (sdev && sdev->ops && sdev->ops->fault_cb) {
  572. int rwxp = (req->rd_req << 3) | (req->wr_req << 2) |
  573. (req->exe_req << 1) | (req->priv_req);
  574. sdev->ops->fault_cb(sdev->dev, req->pasid, req->addr, req->private, rwxp, result);
  575. }
  576. /* We get here in the error case where the PASID lookup failed,
  577. and these can be NULL. Do not use them below this point! */
  578. sdev = NULL;
  579. svm = NULL;
  580. no_pasid:
  581. if (req->lpig) {
  582. /* Page Group Response */
  583. resp.low = QI_PGRP_PASID(req->pasid) |
  584. QI_PGRP_DID((req->bus << 8) | req->devfn) |
  585. QI_PGRP_PASID_P(req->pasid_present) |
  586. QI_PGRP_RESP_TYPE;
  587. resp.high = QI_PGRP_IDX(req->prg_index) |
  588. QI_PGRP_PRIV(req->private) | QI_PGRP_RESP_CODE(result);
  589. qi_submit_sync(&resp, iommu);
  590. } else if (req->srr) {
  591. /* Page Stream Response */
  592. resp.low = QI_PSTRM_IDX(req->prg_index) |
  593. QI_PSTRM_PRIV(req->private) | QI_PSTRM_BUS(req->bus) |
  594. QI_PSTRM_PASID(req->pasid) | QI_PSTRM_RESP_TYPE;
  595. resp.high = QI_PSTRM_ADDR(address) | QI_PSTRM_DEVFN(req->devfn) |
  596. QI_PSTRM_RESP_CODE(result);
  597. qi_submit_sync(&resp, iommu);
  598. }
  599. head = (head + sizeof(*req)) & PRQ_RING_MASK;
  600. }
  601. dmar_writeq(iommu->reg + DMAR_PQH_REG, tail);
  602. return IRQ_RETVAL(handled);
  603. }