amd_iommu_v2.c 22 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010
  1. /*
  2. * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
  3. * Author: Joerg Roedel <jroedel@suse.de>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published
  7. * by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  17. */
  18. #include <linux/mmu_notifier.h>
  19. #include <linux/amd-iommu.h>
  20. #include <linux/mm_types.h>
  21. #include <linux/profile.h>
  22. #include <linux/module.h>
  23. #include <linux/sched.h>
  24. #include <linux/sched/mm.h>
  25. #include <linux/iommu.h>
  26. #include <linux/wait.h>
  27. #include <linux/pci.h>
  28. #include <linux/gfp.h>
  29. #include "amd_iommu_types.h"
  30. #include "amd_iommu_proto.h"
  31. MODULE_LICENSE("GPL v2");
  32. MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>");
  33. #define MAX_DEVICES 0x10000
  34. #define PRI_QUEUE_SIZE 512
  35. struct pri_queue {
  36. atomic_t inflight;
  37. bool finish;
  38. int status;
  39. };
  40. struct pasid_state {
  41. struct list_head list; /* For global state-list */
  42. atomic_t count; /* Reference count */
  43. unsigned mmu_notifier_count; /* Counting nested mmu_notifier
  44. calls */
  45. struct mm_struct *mm; /* mm_struct for the faults */
  46. struct mmu_notifier mn; /* mmu_notifier handle */
  47. struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
  48. struct device_state *device_state; /* Link to our device_state */
  49. int pasid; /* PASID index */
  50. bool invalid; /* Used during setup and
  51. teardown of the pasid */
  52. spinlock_t lock; /* Protect pri_queues and
  53. mmu_notifer_count */
  54. wait_queue_head_t wq; /* To wait for count == 0 */
  55. };
  56. struct device_state {
  57. struct list_head list;
  58. u16 devid;
  59. atomic_t count;
  60. struct pci_dev *pdev;
  61. struct pasid_state **states;
  62. struct iommu_domain *domain;
  63. int pasid_levels;
  64. int max_pasids;
  65. amd_iommu_invalid_ppr_cb inv_ppr_cb;
  66. amd_iommu_invalidate_ctx inv_ctx_cb;
  67. spinlock_t lock;
  68. wait_queue_head_t wq;
  69. };
  70. struct fault {
  71. struct work_struct work;
  72. struct device_state *dev_state;
  73. struct pasid_state *state;
  74. struct mm_struct *mm;
  75. u64 address;
  76. u16 devid;
  77. u16 pasid;
  78. u16 tag;
  79. u16 finish;
  80. u16 flags;
  81. };
  82. static LIST_HEAD(state_list);
  83. static spinlock_t state_lock;
  84. static struct workqueue_struct *iommu_wq;
  85. static void free_pasid_states(struct device_state *dev_state);
  86. static u16 device_id(struct pci_dev *pdev)
  87. {
  88. u16 devid;
  89. devid = pdev->bus->number;
  90. devid = (devid << 8) | pdev->devfn;
  91. return devid;
  92. }
  93. static struct device_state *__get_device_state(u16 devid)
  94. {
  95. struct device_state *dev_state;
  96. list_for_each_entry(dev_state, &state_list, list) {
  97. if (dev_state->devid == devid)
  98. return dev_state;
  99. }
  100. return NULL;
  101. }
  102. static struct device_state *get_device_state(u16 devid)
  103. {
  104. struct device_state *dev_state;
  105. unsigned long flags;
  106. spin_lock_irqsave(&state_lock, flags);
  107. dev_state = __get_device_state(devid);
  108. if (dev_state != NULL)
  109. atomic_inc(&dev_state->count);
  110. spin_unlock_irqrestore(&state_lock, flags);
  111. return dev_state;
  112. }
  113. static void free_device_state(struct device_state *dev_state)
  114. {
  115. struct iommu_group *group;
  116. /*
  117. * First detach device from domain - No more PRI requests will arrive
  118. * from that device after it is unbound from the IOMMUv2 domain.
  119. */
  120. group = iommu_group_get(&dev_state->pdev->dev);
  121. if (WARN_ON(!group))
  122. return;
  123. iommu_detach_group(dev_state->domain, group);
  124. iommu_group_put(group);
  125. /* Everything is down now, free the IOMMUv2 domain */
  126. iommu_domain_free(dev_state->domain);
  127. /* Finally get rid of the device-state */
  128. kfree(dev_state);
  129. }
  130. static void put_device_state(struct device_state *dev_state)
  131. {
  132. if (atomic_dec_and_test(&dev_state->count))
  133. wake_up(&dev_state->wq);
  134. }
  135. /* Must be called under dev_state->lock */
  136. static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
  137. int pasid, bool alloc)
  138. {
  139. struct pasid_state **root, **ptr;
  140. int level, index;
  141. level = dev_state->pasid_levels;
  142. root = dev_state->states;
  143. while (true) {
  144. index = (pasid >> (9 * level)) & 0x1ff;
  145. ptr = &root[index];
  146. if (level == 0)
  147. break;
  148. if (*ptr == NULL) {
  149. if (!alloc)
  150. return NULL;
  151. *ptr = (void *)get_zeroed_page(GFP_ATOMIC);
  152. if (*ptr == NULL)
  153. return NULL;
  154. }
  155. root = (struct pasid_state **)*ptr;
  156. level -= 1;
  157. }
  158. return ptr;
  159. }
  160. static int set_pasid_state(struct device_state *dev_state,
  161. struct pasid_state *pasid_state,
  162. int pasid)
  163. {
  164. struct pasid_state **ptr;
  165. unsigned long flags;
  166. int ret;
  167. spin_lock_irqsave(&dev_state->lock, flags);
  168. ptr = __get_pasid_state_ptr(dev_state, pasid, true);
  169. ret = -ENOMEM;
  170. if (ptr == NULL)
  171. goto out_unlock;
  172. ret = -ENOMEM;
  173. if (*ptr != NULL)
  174. goto out_unlock;
  175. *ptr = pasid_state;
  176. ret = 0;
  177. out_unlock:
  178. spin_unlock_irqrestore(&dev_state->lock, flags);
  179. return ret;
  180. }
  181. static void clear_pasid_state(struct device_state *dev_state, int pasid)
  182. {
  183. struct pasid_state **ptr;
  184. unsigned long flags;
  185. spin_lock_irqsave(&dev_state->lock, flags);
  186. ptr = __get_pasid_state_ptr(dev_state, pasid, true);
  187. if (ptr == NULL)
  188. goto out_unlock;
  189. *ptr = NULL;
  190. out_unlock:
  191. spin_unlock_irqrestore(&dev_state->lock, flags);
  192. }
  193. static struct pasid_state *get_pasid_state(struct device_state *dev_state,
  194. int pasid)
  195. {
  196. struct pasid_state **ptr, *ret = NULL;
  197. unsigned long flags;
  198. spin_lock_irqsave(&dev_state->lock, flags);
  199. ptr = __get_pasid_state_ptr(dev_state, pasid, false);
  200. if (ptr == NULL)
  201. goto out_unlock;
  202. ret = *ptr;
  203. if (ret)
  204. atomic_inc(&ret->count);
  205. out_unlock:
  206. spin_unlock_irqrestore(&dev_state->lock, flags);
  207. return ret;
  208. }
  209. static void free_pasid_state(struct pasid_state *pasid_state)
  210. {
  211. kfree(pasid_state);
  212. }
  213. static void put_pasid_state(struct pasid_state *pasid_state)
  214. {
  215. if (atomic_dec_and_test(&pasid_state->count))
  216. wake_up(&pasid_state->wq);
  217. }
  218. static void put_pasid_state_wait(struct pasid_state *pasid_state)
  219. {
  220. atomic_dec(&pasid_state->count);
  221. wait_event(pasid_state->wq, !atomic_read(&pasid_state->count));
  222. free_pasid_state(pasid_state);
  223. }
  224. static void unbind_pasid(struct pasid_state *pasid_state)
  225. {
  226. struct iommu_domain *domain;
  227. domain = pasid_state->device_state->domain;
  228. /*
  229. * Mark pasid_state as invalid, no more faults will we added to the
  230. * work queue after this is visible everywhere.
  231. */
  232. pasid_state->invalid = true;
  233. /* Make sure this is visible */
  234. smp_wmb();
  235. /* After this the device/pasid can't access the mm anymore */
  236. amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid);
  237. /* Make sure no more pending faults are in the queue */
  238. flush_workqueue(iommu_wq);
  239. }
  240. static void free_pasid_states_level1(struct pasid_state **tbl)
  241. {
  242. int i;
  243. for (i = 0; i < 512; ++i) {
  244. if (tbl[i] == NULL)
  245. continue;
  246. free_page((unsigned long)tbl[i]);
  247. }
  248. }
  249. static void free_pasid_states_level2(struct pasid_state **tbl)
  250. {
  251. struct pasid_state **ptr;
  252. int i;
  253. for (i = 0; i < 512; ++i) {
  254. if (tbl[i] == NULL)
  255. continue;
  256. ptr = (struct pasid_state **)tbl[i];
  257. free_pasid_states_level1(ptr);
  258. }
  259. }
  260. static void free_pasid_states(struct device_state *dev_state)
  261. {
  262. struct pasid_state *pasid_state;
  263. int i;
  264. for (i = 0; i < dev_state->max_pasids; ++i) {
  265. pasid_state = get_pasid_state(dev_state, i);
  266. if (pasid_state == NULL)
  267. continue;
  268. put_pasid_state(pasid_state);
  269. /*
  270. * This will call the mn_release function and
  271. * unbind the PASID
  272. */
  273. mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
  274. put_pasid_state_wait(pasid_state); /* Reference taken in
  275. amd_iommu_bind_pasid */
  276. /* Drop reference taken in amd_iommu_bind_pasid */
  277. put_device_state(dev_state);
  278. }
  279. if (dev_state->pasid_levels == 2)
  280. free_pasid_states_level2(dev_state->states);
  281. else if (dev_state->pasid_levels == 1)
  282. free_pasid_states_level1(dev_state->states);
  283. else
  284. BUG_ON(dev_state->pasid_levels != 0);
  285. free_page((unsigned long)dev_state->states);
  286. }
  287. static struct pasid_state *mn_to_state(struct mmu_notifier *mn)
  288. {
  289. return container_of(mn, struct pasid_state, mn);
  290. }
  291. static void __mn_flush_page(struct mmu_notifier *mn,
  292. unsigned long address)
  293. {
  294. struct pasid_state *pasid_state;
  295. struct device_state *dev_state;
  296. pasid_state = mn_to_state(mn);
  297. dev_state = pasid_state->device_state;
  298. amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, address);
  299. }
  300. static int mn_clear_flush_young(struct mmu_notifier *mn,
  301. struct mm_struct *mm,
  302. unsigned long start,
  303. unsigned long end)
  304. {
  305. for (; start < end; start += PAGE_SIZE)
  306. __mn_flush_page(mn, start);
  307. return 0;
  308. }
  309. static void mn_invalidate_page(struct mmu_notifier *mn,
  310. struct mm_struct *mm,
  311. unsigned long address)
  312. {
  313. __mn_flush_page(mn, address);
  314. }
  315. static void mn_invalidate_range(struct mmu_notifier *mn,
  316. struct mm_struct *mm,
  317. unsigned long start, unsigned long end)
  318. {
  319. struct pasid_state *pasid_state;
  320. struct device_state *dev_state;
  321. pasid_state = mn_to_state(mn);
  322. dev_state = pasid_state->device_state;
  323. if ((start ^ (end - 1)) < PAGE_SIZE)
  324. amd_iommu_flush_page(dev_state->domain, pasid_state->pasid,
  325. start);
  326. else
  327. amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid);
  328. }
  329. static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
  330. {
  331. struct pasid_state *pasid_state;
  332. struct device_state *dev_state;
  333. bool run_inv_ctx_cb;
  334. might_sleep();
  335. pasid_state = mn_to_state(mn);
  336. dev_state = pasid_state->device_state;
  337. run_inv_ctx_cb = !pasid_state->invalid;
  338. if (run_inv_ctx_cb && dev_state->inv_ctx_cb)
  339. dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
  340. unbind_pasid(pasid_state);
  341. }
  342. static const struct mmu_notifier_ops iommu_mn = {
  343. .release = mn_release,
  344. .clear_flush_young = mn_clear_flush_young,
  345. .invalidate_page = mn_invalidate_page,
  346. .invalidate_range = mn_invalidate_range,
  347. };
  348. static void set_pri_tag_status(struct pasid_state *pasid_state,
  349. u16 tag, int status)
  350. {
  351. unsigned long flags;
  352. spin_lock_irqsave(&pasid_state->lock, flags);
  353. pasid_state->pri[tag].status = status;
  354. spin_unlock_irqrestore(&pasid_state->lock, flags);
  355. }
  356. static void finish_pri_tag(struct device_state *dev_state,
  357. struct pasid_state *pasid_state,
  358. u16 tag)
  359. {
  360. unsigned long flags;
  361. spin_lock_irqsave(&pasid_state->lock, flags);
  362. if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) &&
  363. pasid_state->pri[tag].finish) {
  364. amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
  365. pasid_state->pri[tag].status, tag);
  366. pasid_state->pri[tag].finish = false;
  367. pasid_state->pri[tag].status = PPR_SUCCESS;
  368. }
  369. spin_unlock_irqrestore(&pasid_state->lock, flags);
  370. }
  371. static void handle_fault_error(struct fault *fault)
  372. {
  373. int status;
  374. if (!fault->dev_state->inv_ppr_cb) {
  375. set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
  376. return;
  377. }
  378. status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
  379. fault->pasid,
  380. fault->address,
  381. fault->flags);
  382. switch (status) {
  383. case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
  384. set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
  385. break;
  386. case AMD_IOMMU_INV_PRI_RSP_INVALID:
  387. set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
  388. break;
  389. case AMD_IOMMU_INV_PRI_RSP_FAIL:
  390. set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
  391. break;
  392. default:
  393. BUG();
  394. }
  395. }
  396. static bool access_error(struct vm_area_struct *vma, struct fault *fault)
  397. {
  398. unsigned long requested = 0;
  399. if (fault->flags & PPR_FAULT_EXEC)
  400. requested |= VM_EXEC;
  401. if (fault->flags & PPR_FAULT_READ)
  402. requested |= VM_READ;
  403. if (fault->flags & PPR_FAULT_WRITE)
  404. requested |= VM_WRITE;
  405. return (requested & ~vma->vm_flags) != 0;
  406. }
  407. static void do_fault(struct work_struct *work)
  408. {
  409. struct fault *fault = container_of(work, struct fault, work);
  410. struct vm_area_struct *vma;
  411. int ret = VM_FAULT_ERROR;
  412. unsigned int flags = 0;
  413. struct mm_struct *mm;
  414. u64 address;
  415. mm = fault->state->mm;
  416. address = fault->address;
  417. if (fault->flags & PPR_FAULT_USER)
  418. flags |= FAULT_FLAG_USER;
  419. if (fault->flags & PPR_FAULT_WRITE)
  420. flags |= FAULT_FLAG_WRITE;
  421. flags |= FAULT_FLAG_REMOTE;
  422. down_read(&mm->mmap_sem);
  423. vma = find_extend_vma(mm, address);
  424. if (!vma || address < vma->vm_start)
  425. /* failed to get a vma in the right range */
  426. goto out;
  427. /* Check if we have the right permissions on the vma */
  428. if (access_error(vma, fault))
  429. goto out;
  430. ret = handle_mm_fault(vma, address, flags);
  431. out:
  432. up_read(&mm->mmap_sem);
  433. if (ret & VM_FAULT_ERROR)
  434. /* failed to service fault */
  435. handle_fault_error(fault);
  436. finish_pri_tag(fault->dev_state, fault->state, fault->tag);
  437. put_pasid_state(fault->state);
  438. kfree(fault);
  439. }
  440. static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
  441. {
  442. struct amd_iommu_fault *iommu_fault;
  443. struct pasid_state *pasid_state;
  444. struct device_state *dev_state;
  445. unsigned long flags;
  446. struct fault *fault;
  447. bool finish;
  448. u16 tag;
  449. int ret;
  450. iommu_fault = data;
  451. tag = iommu_fault->tag & 0x1ff;
  452. finish = (iommu_fault->tag >> 9) & 1;
  453. ret = NOTIFY_DONE;
  454. dev_state = get_device_state(iommu_fault->device_id);
  455. if (dev_state == NULL)
  456. goto out;
  457. pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
  458. if (pasid_state == NULL || pasid_state->invalid) {
  459. /* We know the device but not the PASID -> send INVALID */
  460. amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
  461. PPR_INVALID, tag);
  462. goto out_drop_state;
  463. }
  464. spin_lock_irqsave(&pasid_state->lock, flags);
  465. atomic_inc(&pasid_state->pri[tag].inflight);
  466. if (finish)
  467. pasid_state->pri[tag].finish = true;
  468. spin_unlock_irqrestore(&pasid_state->lock, flags);
  469. fault = kzalloc(sizeof(*fault), GFP_ATOMIC);
  470. if (fault == NULL) {
  471. /* We are OOM - send success and let the device re-fault */
  472. finish_pri_tag(dev_state, pasid_state, tag);
  473. goto out_drop_state;
  474. }
  475. fault->dev_state = dev_state;
  476. fault->address = iommu_fault->address;
  477. fault->state = pasid_state;
  478. fault->tag = tag;
  479. fault->finish = finish;
  480. fault->pasid = iommu_fault->pasid;
  481. fault->flags = iommu_fault->flags;
  482. INIT_WORK(&fault->work, do_fault);
  483. queue_work(iommu_wq, &fault->work);
  484. ret = NOTIFY_OK;
  485. out_drop_state:
  486. if (ret != NOTIFY_OK && pasid_state)
  487. put_pasid_state(pasid_state);
  488. put_device_state(dev_state);
  489. out:
  490. return ret;
  491. }
  492. static struct notifier_block ppr_nb = {
  493. .notifier_call = ppr_notifier,
  494. };
  495. int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
  496. struct task_struct *task)
  497. {
  498. struct pasid_state *pasid_state;
  499. struct device_state *dev_state;
  500. struct mm_struct *mm;
  501. u16 devid;
  502. int ret;
  503. might_sleep();
  504. if (!amd_iommu_v2_supported())
  505. return -ENODEV;
  506. devid = device_id(pdev);
  507. dev_state = get_device_state(devid);
  508. if (dev_state == NULL)
  509. return -EINVAL;
  510. ret = -EINVAL;
  511. if (pasid < 0 || pasid >= dev_state->max_pasids)
  512. goto out;
  513. ret = -ENOMEM;
  514. pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL);
  515. if (pasid_state == NULL)
  516. goto out;
  517. atomic_set(&pasid_state->count, 1);
  518. init_waitqueue_head(&pasid_state->wq);
  519. spin_lock_init(&pasid_state->lock);
  520. mm = get_task_mm(task);
  521. pasid_state->mm = mm;
  522. pasid_state->device_state = dev_state;
  523. pasid_state->pasid = pasid;
  524. pasid_state->invalid = true; /* Mark as valid only if we are
  525. done with setting up the pasid */
  526. pasid_state->mn.ops = &iommu_mn;
  527. if (pasid_state->mm == NULL)
  528. goto out_free;
  529. mmu_notifier_register(&pasid_state->mn, mm);
  530. ret = set_pasid_state(dev_state, pasid_state, pasid);
  531. if (ret)
  532. goto out_unregister;
  533. ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
  534. __pa(pasid_state->mm->pgd));
  535. if (ret)
  536. goto out_clear_state;
  537. /* Now we are ready to handle faults */
  538. pasid_state->invalid = false;
  539. /*
  540. * Drop the reference to the mm_struct here. We rely on the
  541. * mmu_notifier release call-back to inform us when the mm
  542. * is going away.
  543. */
  544. mmput(mm);
  545. return 0;
  546. out_clear_state:
  547. clear_pasid_state(dev_state, pasid);
  548. out_unregister:
  549. mmu_notifier_unregister(&pasid_state->mn, mm);
  550. out_free:
  551. mmput(mm);
  552. free_pasid_state(pasid_state);
  553. out:
  554. put_device_state(dev_state);
  555. return ret;
  556. }
  557. EXPORT_SYMBOL(amd_iommu_bind_pasid);
  558. void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
  559. {
  560. struct pasid_state *pasid_state;
  561. struct device_state *dev_state;
  562. u16 devid;
  563. might_sleep();
  564. if (!amd_iommu_v2_supported())
  565. return;
  566. devid = device_id(pdev);
  567. dev_state = get_device_state(devid);
  568. if (dev_state == NULL)
  569. return;
  570. if (pasid < 0 || pasid >= dev_state->max_pasids)
  571. goto out;
  572. pasid_state = get_pasid_state(dev_state, pasid);
  573. if (pasid_state == NULL)
  574. goto out;
  575. /*
  576. * Drop reference taken here. We are safe because we still hold
  577. * the reference taken in the amd_iommu_bind_pasid function.
  578. */
  579. put_pasid_state(pasid_state);
  580. /* Clear the pasid state so that the pasid can be re-used */
  581. clear_pasid_state(dev_state, pasid_state->pasid);
  582. /*
  583. * Call mmu_notifier_unregister to drop our reference
  584. * to pasid_state->mm
  585. */
  586. mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
  587. put_pasid_state_wait(pasid_state); /* Reference taken in
  588. amd_iommu_bind_pasid */
  589. out:
  590. /* Drop reference taken in this function */
  591. put_device_state(dev_state);
  592. /* Drop reference taken in amd_iommu_bind_pasid */
  593. put_device_state(dev_state);
  594. }
  595. EXPORT_SYMBOL(amd_iommu_unbind_pasid);
  596. int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
  597. {
  598. struct device_state *dev_state;
  599. struct iommu_group *group;
  600. unsigned long flags;
  601. int ret, tmp;
  602. u16 devid;
  603. might_sleep();
  604. if (!amd_iommu_v2_supported())
  605. return -ENODEV;
  606. if (pasids <= 0 || pasids > (PASID_MASK + 1))
  607. return -EINVAL;
  608. devid = device_id(pdev);
  609. dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
  610. if (dev_state == NULL)
  611. return -ENOMEM;
  612. spin_lock_init(&dev_state->lock);
  613. init_waitqueue_head(&dev_state->wq);
  614. dev_state->pdev = pdev;
  615. dev_state->devid = devid;
  616. tmp = pasids;
  617. for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
  618. dev_state->pasid_levels += 1;
  619. atomic_set(&dev_state->count, 1);
  620. dev_state->max_pasids = pasids;
  621. ret = -ENOMEM;
  622. dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
  623. if (dev_state->states == NULL)
  624. goto out_free_dev_state;
  625. dev_state->domain = iommu_domain_alloc(&pci_bus_type);
  626. if (dev_state->domain == NULL)
  627. goto out_free_states;
  628. amd_iommu_domain_direct_map(dev_state->domain);
  629. ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
  630. if (ret)
  631. goto out_free_domain;
  632. group = iommu_group_get(&pdev->dev);
  633. if (!group) {
  634. ret = -EINVAL;
  635. goto out_free_domain;
  636. }
  637. ret = iommu_attach_group(dev_state->domain, group);
  638. if (ret != 0)
  639. goto out_drop_group;
  640. iommu_group_put(group);
  641. spin_lock_irqsave(&state_lock, flags);
  642. if (__get_device_state(devid) != NULL) {
  643. spin_unlock_irqrestore(&state_lock, flags);
  644. ret = -EBUSY;
  645. goto out_free_domain;
  646. }
  647. list_add_tail(&dev_state->list, &state_list);
  648. spin_unlock_irqrestore(&state_lock, flags);
  649. return 0;
  650. out_drop_group:
  651. iommu_group_put(group);
  652. out_free_domain:
  653. iommu_domain_free(dev_state->domain);
  654. out_free_states:
  655. free_page((unsigned long)dev_state->states);
  656. out_free_dev_state:
  657. kfree(dev_state);
  658. return ret;
  659. }
  660. EXPORT_SYMBOL(amd_iommu_init_device);
  661. void amd_iommu_free_device(struct pci_dev *pdev)
  662. {
  663. struct device_state *dev_state;
  664. unsigned long flags;
  665. u16 devid;
  666. if (!amd_iommu_v2_supported())
  667. return;
  668. devid = device_id(pdev);
  669. spin_lock_irqsave(&state_lock, flags);
  670. dev_state = __get_device_state(devid);
  671. if (dev_state == NULL) {
  672. spin_unlock_irqrestore(&state_lock, flags);
  673. return;
  674. }
  675. list_del(&dev_state->list);
  676. spin_unlock_irqrestore(&state_lock, flags);
  677. /* Get rid of any remaining pasid states */
  678. free_pasid_states(dev_state);
  679. put_device_state(dev_state);
  680. /*
  681. * Wait until the last reference is dropped before freeing
  682. * the device state.
  683. */
  684. wait_event(dev_state->wq, !atomic_read(&dev_state->count));
  685. free_device_state(dev_state);
  686. }
  687. EXPORT_SYMBOL(amd_iommu_free_device);
  688. int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
  689. amd_iommu_invalid_ppr_cb cb)
  690. {
  691. struct device_state *dev_state;
  692. unsigned long flags;
  693. u16 devid;
  694. int ret;
  695. if (!amd_iommu_v2_supported())
  696. return -ENODEV;
  697. devid = device_id(pdev);
  698. spin_lock_irqsave(&state_lock, flags);
  699. ret = -EINVAL;
  700. dev_state = __get_device_state(devid);
  701. if (dev_state == NULL)
  702. goto out_unlock;
  703. dev_state->inv_ppr_cb = cb;
  704. ret = 0;
  705. out_unlock:
  706. spin_unlock_irqrestore(&state_lock, flags);
  707. return ret;
  708. }
  709. EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb);
  710. int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
  711. amd_iommu_invalidate_ctx cb)
  712. {
  713. struct device_state *dev_state;
  714. unsigned long flags;
  715. u16 devid;
  716. int ret;
  717. if (!amd_iommu_v2_supported())
  718. return -ENODEV;
  719. devid = device_id(pdev);
  720. spin_lock_irqsave(&state_lock, flags);
  721. ret = -EINVAL;
  722. dev_state = __get_device_state(devid);
  723. if (dev_state == NULL)
  724. goto out_unlock;
  725. dev_state->inv_ctx_cb = cb;
  726. ret = 0;
  727. out_unlock:
  728. spin_unlock_irqrestore(&state_lock, flags);
  729. return ret;
  730. }
  731. EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
  732. static int __init amd_iommu_v2_init(void)
  733. {
  734. int ret;
  735. pr_info("AMD IOMMUv2 driver by Joerg Roedel <jroedel@suse.de>\n");
  736. if (!amd_iommu_v2_supported()) {
  737. pr_info("AMD IOMMUv2 functionality not available on this system\n");
  738. /*
  739. * Load anyway to provide the symbols to other modules
  740. * which may use AMD IOMMUv2 optionally.
  741. */
  742. return 0;
  743. }
  744. spin_lock_init(&state_lock);
  745. ret = -ENOMEM;
  746. iommu_wq = alloc_workqueue("amd_iommu_v2", WQ_MEM_RECLAIM, 0);
  747. if (iommu_wq == NULL)
  748. goto out;
  749. amd_iommu_register_ppr_notifier(&ppr_nb);
  750. return 0;
  751. out:
  752. return ret;
  753. }
  754. static void __exit amd_iommu_v2_exit(void)
  755. {
  756. struct device_state *dev_state;
  757. int i;
  758. if (!amd_iommu_v2_supported())
  759. return;
  760. amd_iommu_unregister_ppr_notifier(&ppr_nb);
  761. flush_workqueue(iommu_wq);
  762. /*
  763. * The loop below might call flush_workqueue(), so call
  764. * destroy_workqueue() after it
  765. */
  766. for (i = 0; i < MAX_DEVICES; ++i) {
  767. dev_state = get_device_state(i);
  768. if (dev_state == NULL)
  769. continue;
  770. WARN_ON_ONCE(1);
  771. put_device_state(dev_state);
  772. amd_iommu_free_device(dev_state->pdev);
  773. }
  774. destroy_workqueue(iommu_wq);
  775. }
  776. module_init(amd_iommu_v2_init);
  777. module_exit(amd_iommu_v2_exit);