|
@@ -31,10 +31,10 @@
|
|
|
* IRQfd - generic
|
|
|
*/
|
|
|
struct virqfd {
|
|
|
- struct vfio_pci_device *vdev;
|
|
|
+ void *opaque;
|
|
|
struct eventfd_ctx *eventfd;
|
|
|
- int (*handler)(struct vfio_pci_device *, void *);
|
|
|
- void (*thread)(struct vfio_pci_device *, void *);
|
|
|
+ int (*handler)(void *, void *);
|
|
|
+ void (*thread)(void *, void *);
|
|
|
void *data;
|
|
|
struct work_struct inject;
|
|
|
wait_queue_t wait;
|
|
@@ -74,7 +74,7 @@ static int virqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
|
|
|
if (flags & POLLIN) {
|
|
|
/* An event has been signaled, call function */
|
|
|
if ((!virqfd->handler ||
|
|
|
- virqfd->handler(virqfd->vdev, virqfd->data)) &&
|
|
|
+ virqfd->handler(virqfd->opaque, virqfd->data)) &&
|
|
|
virqfd->thread)
|
|
|
schedule_work(&virqfd->inject);
|
|
|
}
|
|
@@ -124,12 +124,12 @@ static void virqfd_inject(struct work_struct *work)
|
|
|
{
|
|
|
struct virqfd *virqfd = container_of(work, struct virqfd, inject);
|
|
|
if (virqfd->thread)
|
|
|
- virqfd->thread(virqfd->vdev, virqfd->data);
|
|
|
+ virqfd->thread(virqfd->opaque, virqfd->data);
|
|
|
}
|
|
|
|
|
|
-int vfio_virqfd_enable(struct vfio_pci_device *vdev,
|
|
|
- int (*handler)(struct vfio_pci_device *, void *),
|
|
|
- void (*thread)(struct vfio_pci_device *, void *),
|
|
|
+int vfio_virqfd_enable(void *opaque,
|
|
|
+ int (*handler)(void *, void *),
|
|
|
+ void (*thread)(void *, void *),
|
|
|
void *data, struct virqfd **pvirqfd, int fd)
|
|
|
{
|
|
|
struct fd irqfd;
|
|
@@ -143,7 +143,7 @@ int vfio_virqfd_enable(struct vfio_pci_device *vdev,
|
|
|
return -ENOMEM;
|
|
|
|
|
|
virqfd->pvirqfd = pvirqfd;
|
|
|
- virqfd->vdev = vdev;
|
|
|
+ virqfd->opaque = opaque;
|
|
|
virqfd->handler = handler;
|
|
|
virqfd->thread = thread;
|
|
|
virqfd->data = data;
|
|
@@ -196,7 +196,7 @@ int vfio_virqfd_enable(struct vfio_pci_device *vdev,
|
|
|
* before we registered and trigger it as if we didn't miss it.
|
|
|
*/
|
|
|
if (events & POLLIN) {
|
|
|
- if ((!handler || handler(vdev, data)) && thread)
|
|
|
+ if ((!handler || handler(opaque, data)) && thread)
|
|
|
schedule_work(&virqfd->inject);
|
|
|
}
|
|
|
|
|
@@ -243,8 +243,10 @@ EXPORT_SYMBOL_GPL(vfio_virqfd_disable);
|
|
|
/*
|
|
|
* INTx
|
|
|
*/
|
|
|
-static void vfio_send_intx_eventfd(struct vfio_pci_device *vdev, void *unused)
|
|
|
+static void vfio_send_intx_eventfd(void *opaque, void *unused)
|
|
|
{
|
|
|
+ struct vfio_pci_device *vdev = opaque;
|
|
|
+
|
|
|
if (likely(is_intx(vdev) && !vdev->virq_disabled))
|
|
|
eventfd_signal(vdev->ctx[0].trigger, 1);
|
|
|
}
|
|
@@ -287,9 +289,9 @@ void vfio_pci_intx_mask(struct vfio_pci_device *vdev)
|
|
|
* a signal is necessary, which can then be handled via a work queue
|
|
|
* or directly depending on the caller.
|
|
|
*/
|
|
|
-static int vfio_pci_intx_unmask_handler(struct vfio_pci_device *vdev,
|
|
|
- void *unused)
|
|
|
+static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
|
|
|
{
|
|
|
+ struct vfio_pci_device *vdev = opaque;
|
|
|
struct pci_dev *pdev = vdev->pdev;
|
|
|
unsigned long flags;
|
|
|
int ret = 0;
|
|
@@ -641,7 +643,7 @@ static int vfio_pci_set_intx_unmask(struct vfio_pci_device *vdev,
|
|
|
} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
|
|
|
int32_t fd = *(int32_t *)data;
|
|
|
if (fd >= 0)
|
|
|
- return vfio_virqfd_enable(vdev,
|
|
|
+ return vfio_virqfd_enable((void *) vdev,
|
|
|
vfio_pci_intx_unmask_handler,
|
|
|
vfio_send_intx_eventfd, NULL,
|
|
|
&vdev->ctx[0].unmask, fd);
|