vfio_pci_intrs.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675
  1. /*
  2. * VFIO PCI interrupt handling
  3. *
  4. * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
  5. * Author: Alex Williamson <alex.williamson@redhat.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * Derived from original vfio:
  12. * Copyright 2010 Cisco Systems, Inc. All rights reserved.
  13. * Author: Tom Lyon, pugs@cisco.com
  14. */
  15. #include <linux/device.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/eventfd.h>
  18. #include <linux/msi.h>
  19. #include <linux/pci.h>
  20. #include <linux/file.h>
  21. #include <linux/vfio.h>
  22. #include <linux/wait.h>
  23. #include <linux/slab.h>
  24. #include "vfio_pci_private.h"
  25. /*
  26. * INTx
  27. */
  28. static void vfio_send_intx_eventfd(void *opaque, void *unused)
  29. {
  30. struct vfio_pci_device *vdev = opaque;
  31. if (likely(is_intx(vdev) && !vdev->virq_disabled))
  32. eventfd_signal(vdev->ctx[0].trigger, 1);
  33. }
  34. void vfio_pci_intx_mask(struct vfio_pci_device *vdev)
  35. {
  36. struct pci_dev *pdev = vdev->pdev;
  37. unsigned long flags;
  38. spin_lock_irqsave(&vdev->irqlock, flags);
  39. /*
  40. * Masking can come from interrupt, ioctl, or config space
  41. * via INTx disable. The latter means this can get called
  42. * even when not using intx delivery. In this case, just
  43. * try to have the physical bit follow the virtual bit.
  44. */
  45. if (unlikely(!is_intx(vdev))) {
  46. if (vdev->pci_2_3)
  47. pci_intx(pdev, 0);
  48. } else if (!vdev->ctx[0].masked) {
  49. /*
  50. * Can't use check_and_mask here because we always want to
  51. * mask, not just when something is pending.
  52. */
  53. if (vdev->pci_2_3)
  54. pci_intx(pdev, 0);
  55. else
  56. disable_irq_nosync(pdev->irq);
  57. vdev->ctx[0].masked = true;
  58. }
  59. spin_unlock_irqrestore(&vdev->irqlock, flags);
  60. }
  61. /*
  62. * If this is triggered by an eventfd, we can't call eventfd_signal
  63. * or else we'll deadlock on the eventfd wait queue. Return >0 when
  64. * a signal is necessary, which can then be handled via a work queue
  65. * or directly depending on the caller.
  66. */
  67. static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
  68. {
  69. struct vfio_pci_device *vdev = opaque;
  70. struct pci_dev *pdev = vdev->pdev;
  71. unsigned long flags;
  72. int ret = 0;
  73. spin_lock_irqsave(&vdev->irqlock, flags);
  74. /*
  75. * Unmasking comes from ioctl or config, so again, have the
  76. * physical bit follow the virtual even when not using INTx.
  77. */
  78. if (unlikely(!is_intx(vdev))) {
  79. if (vdev->pci_2_3)
  80. pci_intx(pdev, 1);
  81. } else if (vdev->ctx[0].masked && !vdev->virq_disabled) {
  82. /*
  83. * A pending interrupt here would immediately trigger,
  84. * but we can avoid that overhead by just re-sending
  85. * the interrupt to the user.
  86. */
  87. if (vdev->pci_2_3) {
  88. if (!pci_check_and_unmask_intx(pdev))
  89. ret = 1;
  90. } else
  91. enable_irq(pdev->irq);
  92. vdev->ctx[0].masked = (ret > 0);
  93. }
  94. spin_unlock_irqrestore(&vdev->irqlock, flags);
  95. return ret;
  96. }
  97. void vfio_pci_intx_unmask(struct vfio_pci_device *vdev)
  98. {
  99. if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
  100. vfio_send_intx_eventfd(vdev, NULL);
  101. }
  102. static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
  103. {
  104. struct vfio_pci_device *vdev = dev_id;
  105. unsigned long flags;
  106. int ret = IRQ_NONE;
  107. spin_lock_irqsave(&vdev->irqlock, flags);
  108. if (!vdev->pci_2_3) {
  109. disable_irq_nosync(vdev->pdev->irq);
  110. vdev->ctx[0].masked = true;
  111. ret = IRQ_HANDLED;
  112. } else if (!vdev->ctx[0].masked && /* may be shared */
  113. pci_check_and_mask_intx(vdev->pdev)) {
  114. vdev->ctx[0].masked = true;
  115. ret = IRQ_HANDLED;
  116. }
  117. spin_unlock_irqrestore(&vdev->irqlock, flags);
  118. if (ret == IRQ_HANDLED)
  119. vfio_send_intx_eventfd(vdev, NULL);
  120. return ret;
  121. }
  122. static int vfio_intx_enable(struct vfio_pci_device *vdev)
  123. {
  124. if (!is_irq_none(vdev))
  125. return -EINVAL;
  126. if (!vdev->pdev->irq)
  127. return -ENODEV;
  128. vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
  129. if (!vdev->ctx)
  130. return -ENOMEM;
  131. vdev->num_ctx = 1;
  132. /*
  133. * If the virtual interrupt is masked, restore it. Devices
  134. * supporting DisINTx can be masked at the hardware level
  135. * here, non-PCI-2.3 devices will have to wait until the
  136. * interrupt is enabled.
  137. */
  138. vdev->ctx[0].masked = vdev->virq_disabled;
  139. if (vdev->pci_2_3)
  140. pci_intx(vdev->pdev, !vdev->ctx[0].masked);
  141. vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
  142. return 0;
  143. }
  144. static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
  145. {
  146. struct pci_dev *pdev = vdev->pdev;
  147. unsigned long irqflags = IRQF_SHARED;
  148. struct eventfd_ctx *trigger;
  149. unsigned long flags;
  150. int ret;
  151. if (vdev->ctx[0].trigger) {
  152. free_irq(pdev->irq, vdev);
  153. kfree(vdev->ctx[0].name);
  154. eventfd_ctx_put(vdev->ctx[0].trigger);
  155. vdev->ctx[0].trigger = NULL;
  156. }
  157. if (fd < 0) /* Disable only */
  158. return 0;
  159. vdev->ctx[0].name = kasprintf(GFP_KERNEL, "vfio-intx(%s)",
  160. pci_name(pdev));
  161. if (!vdev->ctx[0].name)
  162. return -ENOMEM;
  163. trigger = eventfd_ctx_fdget(fd);
  164. if (IS_ERR(trigger)) {
  165. kfree(vdev->ctx[0].name);
  166. return PTR_ERR(trigger);
  167. }
  168. vdev->ctx[0].trigger = trigger;
  169. if (!vdev->pci_2_3)
  170. irqflags = 0;
  171. ret = request_irq(pdev->irq, vfio_intx_handler,
  172. irqflags, vdev->ctx[0].name, vdev);
  173. if (ret) {
  174. vdev->ctx[0].trigger = NULL;
  175. kfree(vdev->ctx[0].name);
  176. eventfd_ctx_put(trigger);
  177. return ret;
  178. }
  179. /*
  180. * INTx disable will stick across the new irq setup,
  181. * disable_irq won't.
  182. */
  183. spin_lock_irqsave(&vdev->irqlock, flags);
  184. if (!vdev->pci_2_3 && vdev->ctx[0].masked)
  185. disable_irq_nosync(pdev->irq);
  186. spin_unlock_irqrestore(&vdev->irqlock, flags);
  187. return 0;
  188. }
  189. static void vfio_intx_disable(struct vfio_pci_device *vdev)
  190. {
  191. vfio_intx_set_signal(vdev, -1);
  192. vfio_virqfd_disable(&vdev->ctx[0].unmask);
  193. vfio_virqfd_disable(&vdev->ctx[0].mask);
  194. vdev->irq_type = VFIO_PCI_NUM_IRQS;
  195. vdev->num_ctx = 0;
  196. kfree(vdev->ctx);
  197. }
  198. /*
  199. * MSI/MSI-X
  200. */
  201. static irqreturn_t vfio_msihandler(int irq, void *arg)
  202. {
  203. struct eventfd_ctx *trigger = arg;
  204. eventfd_signal(trigger, 1);
  205. return IRQ_HANDLED;
  206. }
  207. static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
  208. {
  209. struct pci_dev *pdev = vdev->pdev;
  210. int ret;
  211. if (!is_irq_none(vdev))
  212. return -EINVAL;
  213. vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
  214. if (!vdev->ctx)
  215. return -ENOMEM;
  216. if (msix) {
  217. int i;
  218. vdev->msix = kzalloc(nvec * sizeof(struct msix_entry),
  219. GFP_KERNEL);
  220. if (!vdev->msix) {
  221. kfree(vdev->ctx);
  222. return -ENOMEM;
  223. }
  224. for (i = 0; i < nvec; i++)
  225. vdev->msix[i].entry = i;
  226. ret = pci_enable_msix_range(pdev, vdev->msix, 1, nvec);
  227. if (ret < nvec) {
  228. if (ret > 0)
  229. pci_disable_msix(pdev);
  230. kfree(vdev->msix);
  231. kfree(vdev->ctx);
  232. return ret;
  233. }
  234. } else {
  235. ret = pci_enable_msi_range(pdev, 1, nvec);
  236. if (ret < nvec) {
  237. if (ret > 0)
  238. pci_disable_msi(pdev);
  239. kfree(vdev->ctx);
  240. return ret;
  241. }
  242. }
  243. vdev->num_ctx = nvec;
  244. vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
  245. VFIO_PCI_MSI_IRQ_INDEX;
  246. if (!msix) {
  247. /*
  248. * Compute the virtual hardware field for max msi vectors -
  249. * it is the log base 2 of the number of vectors.
  250. */
  251. vdev->msi_qmax = fls(nvec * 2 - 1) - 1;
  252. }
  253. return 0;
  254. }
  255. static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
  256. int vector, int fd, bool msix)
  257. {
  258. struct pci_dev *pdev = vdev->pdev;
  259. int irq = msix ? vdev->msix[vector].vector : pdev->irq + vector;
  260. char *name = msix ? "vfio-msix" : "vfio-msi";
  261. struct eventfd_ctx *trigger;
  262. int ret;
  263. if (vector >= vdev->num_ctx)
  264. return -EINVAL;
  265. if (vdev->ctx[vector].trigger) {
  266. free_irq(irq, vdev->ctx[vector].trigger);
  267. kfree(vdev->ctx[vector].name);
  268. eventfd_ctx_put(vdev->ctx[vector].trigger);
  269. vdev->ctx[vector].trigger = NULL;
  270. }
  271. if (fd < 0)
  272. return 0;
  273. vdev->ctx[vector].name = kasprintf(GFP_KERNEL, "%s[%d](%s)",
  274. name, vector, pci_name(pdev));
  275. if (!vdev->ctx[vector].name)
  276. return -ENOMEM;
  277. trigger = eventfd_ctx_fdget(fd);
  278. if (IS_ERR(trigger)) {
  279. kfree(vdev->ctx[vector].name);
  280. return PTR_ERR(trigger);
  281. }
  282. /*
  283. * The MSIx vector table resides in device memory which may be cleared
  284. * via backdoor resets. We don't allow direct access to the vector
  285. * table so even if a userspace driver attempts to save/restore around
  286. * such a reset it would be unsuccessful. To avoid this, restore the
  287. * cached value of the message prior to enabling.
  288. */
  289. if (msix) {
  290. struct msi_msg msg;
  291. get_cached_msi_msg(irq, &msg);
  292. pci_write_msi_msg(irq, &msg);
  293. }
  294. ret = request_irq(irq, vfio_msihandler, 0,
  295. vdev->ctx[vector].name, trigger);
  296. if (ret) {
  297. kfree(vdev->ctx[vector].name);
  298. eventfd_ctx_put(trigger);
  299. return ret;
  300. }
  301. vdev->ctx[vector].trigger = trigger;
  302. return 0;
  303. }
  304. static int vfio_msi_set_block(struct vfio_pci_device *vdev, unsigned start,
  305. unsigned count, int32_t *fds, bool msix)
  306. {
  307. int i, j, ret = 0;
  308. if (start + count > vdev->num_ctx)
  309. return -EINVAL;
  310. for (i = 0, j = start; i < count && !ret; i++, j++) {
  311. int fd = fds ? fds[i] : -1;
  312. ret = vfio_msi_set_vector_signal(vdev, j, fd, msix);
  313. }
  314. if (ret) {
  315. for (--j; j >= start; j--)
  316. vfio_msi_set_vector_signal(vdev, j, -1, msix);
  317. }
  318. return ret;
  319. }
  320. static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
  321. {
  322. struct pci_dev *pdev = vdev->pdev;
  323. int i;
  324. vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
  325. for (i = 0; i < vdev->num_ctx; i++) {
  326. vfio_virqfd_disable(&vdev->ctx[i].unmask);
  327. vfio_virqfd_disable(&vdev->ctx[i].mask);
  328. }
  329. if (msix) {
  330. pci_disable_msix(vdev->pdev);
  331. kfree(vdev->msix);
  332. } else
  333. pci_disable_msi(pdev);
  334. vdev->irq_type = VFIO_PCI_NUM_IRQS;
  335. vdev->num_ctx = 0;
  336. kfree(vdev->ctx);
  337. }
  338. /*
  339. * IOCTL support
  340. */
  341. static int vfio_pci_set_intx_unmask(struct vfio_pci_device *vdev,
  342. unsigned index, unsigned start,
  343. unsigned count, uint32_t flags, void *data)
  344. {
  345. if (!is_intx(vdev) || start != 0 || count != 1)
  346. return -EINVAL;
  347. if (flags & VFIO_IRQ_SET_DATA_NONE) {
  348. vfio_pci_intx_unmask(vdev);
  349. } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
  350. uint8_t unmask = *(uint8_t *)data;
  351. if (unmask)
  352. vfio_pci_intx_unmask(vdev);
  353. } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
  354. int32_t fd = *(int32_t *)data;
  355. if (fd >= 0)
  356. return vfio_virqfd_enable((void *) vdev,
  357. vfio_pci_intx_unmask_handler,
  358. vfio_send_intx_eventfd, NULL,
  359. &vdev->ctx[0].unmask, fd);
  360. vfio_virqfd_disable(&vdev->ctx[0].unmask);
  361. }
  362. return 0;
  363. }
  364. static int vfio_pci_set_intx_mask(struct vfio_pci_device *vdev,
  365. unsigned index, unsigned start,
  366. unsigned count, uint32_t flags, void *data)
  367. {
  368. if (!is_intx(vdev) || start != 0 || count != 1)
  369. return -EINVAL;
  370. if (flags & VFIO_IRQ_SET_DATA_NONE) {
  371. vfio_pci_intx_mask(vdev);
  372. } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
  373. uint8_t mask = *(uint8_t *)data;
  374. if (mask)
  375. vfio_pci_intx_mask(vdev);
  376. } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
  377. return -ENOTTY; /* XXX implement me */
  378. }
  379. return 0;
  380. }
  381. static int vfio_pci_set_intx_trigger(struct vfio_pci_device *vdev,
  382. unsigned index, unsigned start,
  383. unsigned count, uint32_t flags, void *data)
  384. {
  385. if (is_intx(vdev) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
  386. vfio_intx_disable(vdev);
  387. return 0;
  388. }
  389. if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1)
  390. return -EINVAL;
  391. if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
  392. int32_t fd = *(int32_t *)data;
  393. int ret;
  394. if (is_intx(vdev))
  395. return vfio_intx_set_signal(vdev, fd);
  396. ret = vfio_intx_enable(vdev);
  397. if (ret)
  398. return ret;
  399. ret = vfio_intx_set_signal(vdev, fd);
  400. if (ret)
  401. vfio_intx_disable(vdev);
  402. return ret;
  403. }
  404. if (!is_intx(vdev))
  405. return -EINVAL;
  406. if (flags & VFIO_IRQ_SET_DATA_NONE) {
  407. vfio_send_intx_eventfd(vdev, NULL);
  408. } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
  409. uint8_t trigger = *(uint8_t *)data;
  410. if (trigger)
  411. vfio_send_intx_eventfd(vdev, NULL);
  412. }
  413. return 0;
  414. }
  415. static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev,
  416. unsigned index, unsigned start,
  417. unsigned count, uint32_t flags, void *data)
  418. {
  419. int i;
  420. bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false;
  421. if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
  422. vfio_msi_disable(vdev, msix);
  423. return 0;
  424. }
  425. if (!(irq_is(vdev, index) || is_irq_none(vdev)))
  426. return -EINVAL;
  427. if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
  428. int32_t *fds = data;
  429. int ret;
  430. if (vdev->irq_type == index)
  431. return vfio_msi_set_block(vdev, start, count,
  432. fds, msix);
  433. ret = vfio_msi_enable(vdev, start + count, msix);
  434. if (ret)
  435. return ret;
  436. ret = vfio_msi_set_block(vdev, start, count, fds, msix);
  437. if (ret)
  438. vfio_msi_disable(vdev, msix);
  439. return ret;
  440. }
  441. if (!irq_is(vdev, index) || start + count > vdev->num_ctx)
  442. return -EINVAL;
  443. for (i = start; i < start + count; i++) {
  444. if (!vdev->ctx[i].trigger)
  445. continue;
  446. if (flags & VFIO_IRQ_SET_DATA_NONE) {
  447. eventfd_signal(vdev->ctx[i].trigger, 1);
  448. } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
  449. uint8_t *bools = data;
  450. if (bools[i - start])
  451. eventfd_signal(vdev->ctx[i].trigger, 1);
  452. }
  453. }
  454. return 0;
  455. }
  456. static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
  457. uint32_t flags, void *data)
  458. {
  459. int32_t fd = *(int32_t *)data;
  460. if (!(flags & VFIO_IRQ_SET_DATA_TYPE_MASK))
  461. return -EINVAL;
  462. /* DATA_NONE/DATA_BOOL enables loopback testing */
  463. if (flags & VFIO_IRQ_SET_DATA_NONE) {
  464. if (*ctx)
  465. eventfd_signal(*ctx, 1);
  466. return 0;
  467. } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
  468. uint8_t trigger = *(uint8_t *)data;
  469. if (trigger && *ctx)
  470. eventfd_signal(*ctx, 1);
  471. return 0;
  472. }
  473. /* Handle SET_DATA_EVENTFD */
  474. if (fd == -1) {
  475. if (*ctx)
  476. eventfd_ctx_put(*ctx);
  477. *ctx = NULL;
  478. return 0;
  479. } else if (fd >= 0) {
  480. struct eventfd_ctx *efdctx;
  481. efdctx = eventfd_ctx_fdget(fd);
  482. if (IS_ERR(efdctx))
  483. return PTR_ERR(efdctx);
  484. if (*ctx)
  485. eventfd_ctx_put(*ctx);
  486. *ctx = efdctx;
  487. return 0;
  488. } else
  489. return -EINVAL;
  490. }
  491. static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
  492. unsigned index, unsigned start,
  493. unsigned count, uint32_t flags, void *data)
  494. {
  495. if (index != VFIO_PCI_ERR_IRQ_INDEX)
  496. return -EINVAL;
  497. /*
  498. * We should sanitize start & count, but that wasn't caught
  499. * originally, so this IRQ index must forever ignore them :-(
  500. */
  501. return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger, flags, data);
  502. }
  503. static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev,
  504. unsigned index, unsigned start,
  505. unsigned count, uint32_t flags, void *data)
  506. {
  507. if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count != 1)
  508. return -EINVAL;
  509. return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger, flags, data);
  510. }
  511. int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
  512. unsigned index, unsigned start, unsigned count,
  513. void *data)
  514. {
  515. int (*func)(struct vfio_pci_device *vdev, unsigned index,
  516. unsigned start, unsigned count, uint32_t flags,
  517. void *data) = NULL;
  518. switch (index) {
  519. case VFIO_PCI_INTX_IRQ_INDEX:
  520. switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
  521. case VFIO_IRQ_SET_ACTION_MASK:
  522. func = vfio_pci_set_intx_mask;
  523. break;
  524. case VFIO_IRQ_SET_ACTION_UNMASK:
  525. func = vfio_pci_set_intx_unmask;
  526. break;
  527. case VFIO_IRQ_SET_ACTION_TRIGGER:
  528. func = vfio_pci_set_intx_trigger;
  529. break;
  530. }
  531. break;
  532. case VFIO_PCI_MSI_IRQ_INDEX:
  533. case VFIO_PCI_MSIX_IRQ_INDEX:
  534. switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
  535. case VFIO_IRQ_SET_ACTION_MASK:
  536. case VFIO_IRQ_SET_ACTION_UNMASK:
  537. /* XXX Need masking support exported */
  538. break;
  539. case VFIO_IRQ_SET_ACTION_TRIGGER:
  540. func = vfio_pci_set_msi_trigger;
  541. break;
  542. }
  543. break;
  544. case VFIO_PCI_ERR_IRQ_INDEX:
  545. switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
  546. case VFIO_IRQ_SET_ACTION_TRIGGER:
  547. if (pci_is_pcie(vdev->pdev))
  548. func = vfio_pci_set_err_trigger;
  549. break;
  550. }
  551. break;
  552. case VFIO_PCI_REQ_IRQ_INDEX:
  553. switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
  554. case VFIO_IRQ_SET_ACTION_TRIGGER:
  555. func = vfio_pci_set_req_trigger;
  556. break;
  557. }
  558. break;
  559. }
  560. if (!func)
  561. return -ENOTTY;
  562. return func(vdev, index, start, count, flags, data);
  563. }