|
@@ -40,6 +40,7 @@ struct iio_event_interface {
|
|
struct list_head dev_attr_list;
|
|
struct list_head dev_attr_list;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
struct attribute_group group;
|
|
struct attribute_group group;
|
|
|
|
+ struct mutex read_lock;
|
|
};
|
|
};
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -47,16 +48,17 @@ struct iio_event_interface {
|
|
* @indio_dev: IIO device structure
|
|
* @indio_dev: IIO device structure
|
|
* @ev_code: What event
|
|
* @ev_code: What event
|
|
* @timestamp: When the event occurred
|
|
* @timestamp: When the event occurred
|
|
|
|
+ *
|
|
|
|
+ * Note: The caller must make sure that this function is not running
|
|
|
|
+ * concurrently for the same indio_dev more than once.
|
|
**/
|
|
**/
|
|
int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
|
|
int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
|
|
{
|
|
{
|
|
struct iio_event_interface *ev_int = indio_dev->event_interface;
|
|
struct iio_event_interface *ev_int = indio_dev->event_interface;
|
|
struct iio_event_data ev;
|
|
struct iio_event_data ev;
|
|
- unsigned long flags;
|
|
|
|
int copied;
|
|
int copied;
|
|
|
|
|
|
/* Does anyone care? */
|
|
/* Does anyone care? */
|
|
- spin_lock_irqsave(&ev_int->wait.lock, flags);
|
|
|
|
if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
|
|
if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
|
|
|
|
|
|
ev.id = ev_code;
|
|
ev.id = ev_code;
|
|
@@ -64,9 +66,8 @@ int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
|
|
|
|
|
|
copied = kfifo_put(&ev_int->det_events, ev);
|
|
copied = kfifo_put(&ev_int->det_events, ev);
|
|
if (copied != 0)
|
|
if (copied != 0)
|
|
- wake_up_locked_poll(&ev_int->wait, POLLIN);
|
|
|
|
|
|
+ wake_up_poll(&ev_int->wait, POLLIN);
|
|
}
|
|
}
|
|
- spin_unlock_irqrestore(&ev_int->wait.lock, flags);
|
|
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -87,10 +88,8 @@ static unsigned int iio_event_poll(struct file *filep,
|
|
|
|
|
|
poll_wait(filep, &ev_int->wait, wait);
|
|
poll_wait(filep, &ev_int->wait, wait);
|
|
|
|
|
|
- spin_lock_irq(&ev_int->wait.lock);
|
|
|
|
if (!kfifo_is_empty(&ev_int->det_events))
|
|
if (!kfifo_is_empty(&ev_int->det_events))
|
|
events = POLLIN | POLLRDNORM;
|
|
events = POLLIN | POLLRDNORM;
|
|
- spin_unlock_irq(&ev_int->wait.lock);
|
|
|
|
|
|
|
|
return events;
|
|
return events;
|
|
}
|
|
}
|
|
@@ -111,31 +110,40 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
|
|
if (count < sizeof(struct iio_event_data))
|
|
if (count < sizeof(struct iio_event_data))
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
|
|
|
|
- spin_lock_irq(&ev_int->wait.lock);
|
|
|
|
- if (kfifo_is_empty(&ev_int->det_events)) {
|
|
|
|
- if (filep->f_flags & O_NONBLOCK) {
|
|
|
|
- ret = -EAGAIN;
|
|
|
|
- goto error_unlock;
|
|
|
|
- }
|
|
|
|
- /* Blocking on device; waiting for something to be there */
|
|
|
|
- ret = wait_event_interruptible_locked_irq(ev_int->wait,
|
|
|
|
|
|
+ do {
|
|
|
|
+ if (kfifo_is_empty(&ev_int->det_events)) {
|
|
|
|
+ if (filep->f_flags & O_NONBLOCK)
|
|
|
|
+ return -EAGAIN;
|
|
|
|
+
|
|
|
|
+ ret = wait_event_interruptible(ev_int->wait,
|
|
!kfifo_is_empty(&ev_int->det_events) ||
|
|
!kfifo_is_empty(&ev_int->det_events) ||
|
|
indio_dev->info == NULL);
|
|
indio_dev->info == NULL);
|
|
- if (ret)
|
|
|
|
- goto error_unlock;
|
|
|
|
- if (indio_dev->info == NULL) {
|
|
|
|
- ret = -ENODEV;
|
|
|
|
- goto error_unlock;
|
|
|
|
|
|
+ if (ret)
|
|
|
|
+ return ret;
|
|
|
|
+ if (indio_dev->info == NULL)
|
|
|
|
+ return -ENODEV;
|
|
}
|
|
}
|
|
- /* Single access device so no one else can get the data */
|
|
|
|
- }
|
|
|
|
|
|
|
|
- ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
|
|
|
|
|
|
+ if (mutex_lock_interruptible(&ev_int->read_lock))
|
|
|
|
+ return -ERESTARTSYS;
|
|
|
|
+ ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
|
|
|
|
+ mutex_unlock(&ev_int->read_lock);
|
|
|
|
+
|
|
|
|
+ if (ret)
|
|
|
|
+ return ret;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * If we couldn't read anything from the fifo (a different
|
|
|
|
+ * thread might have been faster) we either return -EAGAIN if
|
|
|
|
+ * the file descriptor is non-blocking, otherwise we go back to
|
|
|
|
+ * sleep and wait for more data to arrive.
|
|
|
|
+ */
|
|
|
|
+ if (copied == 0 && (filep->f_flags & O_NONBLOCK))
|
|
|
|
+ return -EAGAIN;
|
|
|
|
|
|
-error_unlock:
|
|
|
|
- spin_unlock_irq(&ev_int->wait.lock);
|
|
|
|
|
|
+ } while (copied == 0);
|
|
|
|
|
|
- return ret ? ret : copied;
|
|
|
|
|
|
+ return copied;
|
|
}
|
|
}
|
|
|
|
|
|
static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
|
|
static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
|
|
@@ -143,15 +151,7 @@ static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
|
|
struct iio_dev *indio_dev = filep->private_data;
|
|
struct iio_dev *indio_dev = filep->private_data;
|
|
struct iio_event_interface *ev_int = indio_dev->event_interface;
|
|
struct iio_event_interface *ev_int = indio_dev->event_interface;
|
|
|
|
|
|
- spin_lock_irq(&ev_int->wait.lock);
|
|
|
|
- __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
|
|
|
|
- /*
|
|
|
|
- * In order to maintain a clean state for reopening,
|
|
|
|
- * clear out any awaiting events. The mask will prevent
|
|
|
|
- * any new __iio_push_event calls running.
|
|
|
|
- */
|
|
|
|
- kfifo_reset_out(&ev_int->det_events);
|
|
|
|
- spin_unlock_irq(&ev_int->wait.lock);
|
|
|
|
|
|
+ clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
|
|
|
|
|
|
iio_device_put(indio_dev);
|
|
iio_device_put(indio_dev);
|
|
|
|
|
|
@@ -174,22 +174,20 @@ int iio_event_getfd(struct iio_dev *indio_dev)
|
|
if (ev_int == NULL)
|
|
if (ev_int == NULL)
|
|
return -ENODEV;
|
|
return -ENODEV;
|
|
|
|
|
|
- spin_lock_irq(&ev_int->wait.lock);
|
|
|
|
- if (__test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
|
|
|
|
- spin_unlock_irq(&ev_int->wait.lock);
|
|
|
|
|
|
+ if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags))
|
|
return -EBUSY;
|
|
return -EBUSY;
|
|
- }
|
|
|
|
- spin_unlock_irq(&ev_int->wait.lock);
|
|
|
|
|
|
+
|
|
iio_device_get(indio_dev);
|
|
iio_device_get(indio_dev);
|
|
|
|
|
|
fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops,
|
|
fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops,
|
|
indio_dev, O_RDONLY | O_CLOEXEC);
|
|
indio_dev, O_RDONLY | O_CLOEXEC);
|
|
if (fd < 0) {
|
|
if (fd < 0) {
|
|
- spin_lock_irq(&ev_int->wait.lock);
|
|
|
|
- __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
|
|
|
|
- spin_unlock_irq(&ev_int->wait.lock);
|
|
|
|
|
|
+ clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
|
|
iio_device_put(indio_dev);
|
|
iio_device_put(indio_dev);
|
|
|
|
+ } else {
|
|
|
|
+ kfifo_reset_out(&ev_int->det_events);
|
|
}
|
|
}
|
|
|
|
+
|
|
return fd;
|
|
return fd;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -424,6 +422,7 @@ static void iio_setup_ev_int(struct iio_event_interface *ev_int)
|
|
{
|
|
{
|
|
INIT_KFIFO(ev_int->det_events);
|
|
INIT_KFIFO(ev_int->det_events);
|
|
init_waitqueue_head(&ev_int->wait);
|
|
init_waitqueue_head(&ev_int->wait);
|
|
|
|
+ mutex_init(&ev_int->read_lock);
|
|
}
|
|
}
|
|
|
|
|
|
static const char *iio_event_group_name = "events";
|
|
static const char *iio_event_group_name = "events";
|