vfio_platform_irq.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336
  1. /*
  2. * VFIO platform devices interrupt handling
  3. *
  4. * Copyright (C) 2013 - Virtual Open Systems
  5. * Author: Antonios Motakis <a.motakis@virtualopensystems.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License, version 2, as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. */
  16. #include <linux/eventfd.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/slab.h>
  19. #include <linux/types.h>
  20. #include <linux/vfio.h>
  21. #include <linux/irq.h>
  22. #include "vfio_platform_private.h"
  23. static void vfio_platform_mask(struct vfio_platform_irq *irq_ctx)
  24. {
  25. unsigned long flags;
  26. spin_lock_irqsave(&irq_ctx->lock, flags);
  27. if (!irq_ctx->masked) {
  28. disable_irq_nosync(irq_ctx->hwirq);
  29. irq_ctx->masked = true;
  30. }
  31. spin_unlock_irqrestore(&irq_ctx->lock, flags);
  32. }
  33. static int vfio_platform_mask_handler(void *opaque, void *unused)
  34. {
  35. struct vfio_platform_irq *irq_ctx = opaque;
  36. vfio_platform_mask(irq_ctx);
  37. return 0;
  38. }
  39. static int vfio_platform_set_irq_mask(struct vfio_platform_device *vdev,
  40. unsigned index, unsigned start,
  41. unsigned count, uint32_t flags,
  42. void *data)
  43. {
  44. if (start != 0 || count != 1)
  45. return -EINVAL;
  46. if (!(vdev->irqs[index].flags & VFIO_IRQ_INFO_MASKABLE))
  47. return -EINVAL;
  48. if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
  49. int32_t fd = *(int32_t *)data;
  50. if (fd >= 0)
  51. return vfio_virqfd_enable((void *) &vdev->irqs[index],
  52. vfio_platform_mask_handler,
  53. NULL, NULL,
  54. &vdev->irqs[index].mask, fd);
  55. vfio_virqfd_disable(&vdev->irqs[index].mask);
  56. return 0;
  57. }
  58. if (flags & VFIO_IRQ_SET_DATA_NONE) {
  59. vfio_platform_mask(&vdev->irqs[index]);
  60. } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
  61. uint8_t mask = *(uint8_t *)data;
  62. if (mask)
  63. vfio_platform_mask(&vdev->irqs[index]);
  64. }
  65. return 0;
  66. }
  67. static void vfio_platform_unmask(struct vfio_platform_irq *irq_ctx)
  68. {
  69. unsigned long flags;
  70. spin_lock_irqsave(&irq_ctx->lock, flags);
  71. if (irq_ctx->masked) {
  72. enable_irq(irq_ctx->hwirq);
  73. irq_ctx->masked = false;
  74. }
  75. spin_unlock_irqrestore(&irq_ctx->lock, flags);
  76. }
  77. static int vfio_platform_unmask_handler(void *opaque, void *unused)
  78. {
  79. struct vfio_platform_irq *irq_ctx = opaque;
  80. vfio_platform_unmask(irq_ctx);
  81. return 0;
  82. }
  83. static int vfio_platform_set_irq_unmask(struct vfio_platform_device *vdev,
  84. unsigned index, unsigned start,
  85. unsigned count, uint32_t flags,
  86. void *data)
  87. {
  88. if (start != 0 || count != 1)
  89. return -EINVAL;
  90. if (!(vdev->irqs[index].flags & VFIO_IRQ_INFO_MASKABLE))
  91. return -EINVAL;
  92. if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
  93. int32_t fd = *(int32_t *)data;
  94. if (fd >= 0)
  95. return vfio_virqfd_enable((void *) &vdev->irqs[index],
  96. vfio_platform_unmask_handler,
  97. NULL, NULL,
  98. &vdev->irqs[index].unmask,
  99. fd);
  100. vfio_virqfd_disable(&vdev->irqs[index].unmask);
  101. return 0;
  102. }
  103. if (flags & VFIO_IRQ_SET_DATA_NONE) {
  104. vfio_platform_unmask(&vdev->irqs[index]);
  105. } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
  106. uint8_t unmask = *(uint8_t *)data;
  107. if (unmask)
  108. vfio_platform_unmask(&vdev->irqs[index]);
  109. }
  110. return 0;
  111. }
  112. static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id)
  113. {
  114. struct vfio_platform_irq *irq_ctx = dev_id;
  115. unsigned long flags;
  116. int ret = IRQ_NONE;
  117. spin_lock_irqsave(&irq_ctx->lock, flags);
  118. if (!irq_ctx->masked) {
  119. ret = IRQ_HANDLED;
  120. /* automask maskable interrupts */
  121. disable_irq_nosync(irq_ctx->hwirq);
  122. irq_ctx->masked = true;
  123. }
  124. spin_unlock_irqrestore(&irq_ctx->lock, flags);
  125. if (ret == IRQ_HANDLED)
  126. eventfd_signal(irq_ctx->trigger, 1);
  127. return ret;
  128. }
  129. static irqreturn_t vfio_irq_handler(int irq, void *dev_id)
  130. {
  131. struct vfio_platform_irq *irq_ctx = dev_id;
  132. eventfd_signal(irq_ctx->trigger, 1);
  133. return IRQ_HANDLED;
  134. }
  135. static int vfio_set_trigger(struct vfio_platform_device *vdev, int index,
  136. int fd, irq_handler_t handler)
  137. {
  138. struct vfio_platform_irq *irq = &vdev->irqs[index];
  139. struct eventfd_ctx *trigger;
  140. int ret;
  141. if (irq->trigger) {
  142. free_irq(irq->hwirq, irq);
  143. kfree(irq->name);
  144. eventfd_ctx_put(irq->trigger);
  145. irq->trigger = NULL;
  146. }
  147. if (fd < 0) /* Disable only */
  148. return 0;
  149. irq->name = kasprintf(GFP_KERNEL, "vfio-irq[%d](%s)",
  150. irq->hwirq, vdev->name);
  151. if (!irq->name)
  152. return -ENOMEM;
  153. trigger = eventfd_ctx_fdget(fd);
  154. if (IS_ERR(trigger)) {
  155. kfree(irq->name);
  156. return PTR_ERR(trigger);
  157. }
  158. irq->trigger = trigger;
  159. irq_set_status_flags(irq->hwirq, IRQ_NOAUTOEN);
  160. ret = request_irq(irq->hwirq, handler, 0, irq->name, irq);
  161. if (ret) {
  162. kfree(irq->name);
  163. eventfd_ctx_put(trigger);
  164. irq->trigger = NULL;
  165. return ret;
  166. }
  167. if (!irq->masked)
  168. enable_irq(irq->hwirq);
  169. return 0;
  170. }
  171. static int vfio_platform_set_irq_trigger(struct vfio_platform_device *vdev,
  172. unsigned index, unsigned start,
  173. unsigned count, uint32_t flags,
  174. void *data)
  175. {
  176. struct vfio_platform_irq *irq = &vdev->irqs[index];
  177. irq_handler_t handler;
  178. if (vdev->irqs[index].flags & VFIO_IRQ_INFO_AUTOMASKED)
  179. handler = vfio_automasked_irq_handler;
  180. else
  181. handler = vfio_irq_handler;
  182. if (!count && (flags & VFIO_IRQ_SET_DATA_NONE))
  183. return vfio_set_trigger(vdev, index, -1, handler);
  184. if (start != 0 || count != 1)
  185. return -EINVAL;
  186. if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
  187. int32_t fd = *(int32_t *)data;
  188. return vfio_set_trigger(vdev, index, fd, handler);
  189. }
  190. if (flags & VFIO_IRQ_SET_DATA_NONE) {
  191. handler(irq->hwirq, irq);
  192. } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
  193. uint8_t trigger = *(uint8_t *)data;
  194. if (trigger)
  195. handler(irq->hwirq, irq);
  196. }
  197. return 0;
  198. }
  199. int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev,
  200. uint32_t flags, unsigned index, unsigned start,
  201. unsigned count, void *data)
  202. {
  203. int (*func)(struct vfio_platform_device *vdev, unsigned index,
  204. unsigned start, unsigned count, uint32_t flags,
  205. void *data) = NULL;
  206. switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
  207. case VFIO_IRQ_SET_ACTION_MASK:
  208. func = vfio_platform_set_irq_mask;
  209. break;
  210. case VFIO_IRQ_SET_ACTION_UNMASK:
  211. func = vfio_platform_set_irq_unmask;
  212. break;
  213. case VFIO_IRQ_SET_ACTION_TRIGGER:
  214. func = vfio_platform_set_irq_trigger;
  215. break;
  216. }
  217. if (!func)
  218. return -ENOTTY;
  219. return func(vdev, index, start, count, flags, data);
  220. }
  221. int vfio_platform_irq_init(struct vfio_platform_device *vdev)
  222. {
  223. int cnt = 0, i;
  224. while (vdev->get_irq(vdev, cnt) >= 0)
  225. cnt++;
  226. vdev->irqs = kcalloc(cnt, sizeof(struct vfio_platform_irq), GFP_KERNEL);
  227. if (!vdev->irqs)
  228. return -ENOMEM;
  229. for (i = 0; i < cnt; i++) {
  230. int hwirq = vdev->get_irq(vdev, i);
  231. if (hwirq < 0)
  232. goto err;
  233. spin_lock_init(&vdev->irqs[i].lock);
  234. vdev->irqs[i].flags = VFIO_IRQ_INFO_EVENTFD;
  235. if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK)
  236. vdev->irqs[i].flags |= VFIO_IRQ_INFO_MASKABLE
  237. | VFIO_IRQ_INFO_AUTOMASKED;
  238. vdev->irqs[i].count = 1;
  239. vdev->irqs[i].hwirq = hwirq;
  240. vdev->irqs[i].masked = false;
  241. }
  242. vdev->num_irqs = cnt;
  243. return 0;
  244. err:
  245. kfree(vdev->irqs);
  246. return -EINVAL;
  247. }
  248. void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev)
  249. {
  250. int i;
  251. for (i = 0; i < vdev->num_irqs; i++)
  252. vfio_set_trigger(vdev, i, -1, NULL);
  253. vdev->num_irqs = 0;
  254. kfree(vdev->irqs);
  255. }