|
@@ -25,6 +25,7 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
|
|
|
{
|
|
|
unsigned int length, till_end, type;
|
|
|
struct snd_efw_transaction *t;
|
|
|
+ u8 *pull_ptr;
|
|
|
long count = 0;
|
|
|
|
|
|
if (remained < sizeof(type) + sizeof(struct snd_efw_transaction))
|
|
@@ -38,8 +39,17 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
|
|
|
buf += sizeof(type);
|
|
|
|
|
|
/* write into buffer as many responses as possible */
|
|
|
- while (efw->resp_queues > 0) {
|
|
|
- t = (struct snd_efw_transaction *)(efw->pull_ptr);
|
|
|
+ spin_lock_irq(&efw->lock);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * When another task reaches here during this task's access to user
|
|
|
+ * space, it picks up current position in buffer and can read the same
|
|
|
+ * series of responses.
|
|
|
+ */
|
|
|
+ pull_ptr = efw->pull_ptr;
|
|
|
+
|
|
|
+ while (efw->push_ptr != pull_ptr) {
|
|
|
+ t = (struct snd_efw_transaction *)(pull_ptr);
|
|
|
length = be32_to_cpu(t->length) * sizeof(__be32);
|
|
|
|
|
|
/* confirm enough space for this response */
|
|
@@ -49,26 +59,39 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
|
|
|
/* copy from ring buffer to user buffer */
|
|
|
while (length > 0) {
|
|
|
till_end = snd_efw_resp_buf_size -
|
|
|
- (unsigned int)(efw->pull_ptr - efw->resp_buf);
|
|
|
+ (unsigned int)(pull_ptr - efw->resp_buf);
|
|
|
till_end = min_t(unsigned int, length, till_end);
|
|
|
|
|
|
- if (copy_to_user(buf, efw->pull_ptr, till_end))
|
|
|
+ spin_unlock_irq(&efw->lock);
|
|
|
+
|
|
|
+ if (copy_to_user(buf, pull_ptr, till_end))
|
|
|
return -EFAULT;
|
|
|
|
|
|
- efw->pull_ptr += till_end;
|
|
|
- if (efw->pull_ptr >= efw->resp_buf +
|
|
|
- snd_efw_resp_buf_size)
|
|
|
- efw->pull_ptr -= snd_efw_resp_buf_size;
|
|
|
+ spin_lock_irq(&efw->lock);
|
|
|
+
|
|
|
+ pull_ptr += till_end;
|
|
|
+ if (pull_ptr >= efw->resp_buf + snd_efw_resp_buf_size)
|
|
|
+ pull_ptr -= snd_efw_resp_buf_size;
|
|
|
|
|
|
length -= till_end;
|
|
|
buf += till_end;
|
|
|
count += till_end;
|
|
|
remained -= till_end;
|
|
|
}
|
|
|
-
|
|
|
- efw->resp_queues--;
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * All of tasks can read from the buffer nearly simultaneously, but the
|
|
|
+ * last position for each task is different depending on the length of
|
|
|
+ * given buffer. Here, for simplicity, a position of buffer is set by
|
|
|
+ * the latest task. It's better for a listening application to allow one
|
|
|
+ * thread to read from the buffer. Unless, each task can read different
|
|
|
+ * sequence of responses depending on variation of buffer length.
|
|
|
+ */
|
|
|
+ efw->pull_ptr = pull_ptr;
|
|
|
+
|
|
|
+ spin_unlock_irq(&efw->lock);
|
|
|
+
|
|
|
return count;
|
|
|
}
|
|
|
|
|
@@ -76,14 +99,17 @@ static long
|
|
|
hwdep_read_locked(struct snd_efw *efw, char __user *buf, long count,
|
|
|
loff_t *offset)
|
|
|
{
|
|
|
- union snd_firewire_event event;
|
|
|
+ union snd_firewire_event event = {
|
|
|
+ .lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS,
|
|
|
+ };
|
|
|
|
|
|
- memset(&event, 0, sizeof(event));
|
|
|
+ spin_lock_irq(&efw->lock);
|
|
|
|
|
|
- event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
|
|
|
event.lock_status.status = (efw->dev_lock_count > 0);
|
|
|
efw->dev_lock_changed = false;
|
|
|
|
|
|
+ spin_unlock_irq(&efw->lock);
|
|
|
+
|
|
|
count = min_t(long, count, sizeof(event.lock_status));
|
|
|
|
|
|
if (copy_to_user(buf, &event, count))
|
|
@@ -98,10 +124,15 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
|
|
|
{
|
|
|
struct snd_efw *efw = hwdep->private_data;
|
|
|
DEFINE_WAIT(wait);
|
|
|
+ bool dev_lock_changed;
|
|
|
+ bool queued;
|
|
|
|
|
|
spin_lock_irq(&efw->lock);
|
|
|
|
|
|
- while ((!efw->dev_lock_changed) && (efw->resp_queues == 0)) {
|
|
|
+ dev_lock_changed = efw->dev_lock_changed;
|
|
|
+ queued = efw->push_ptr != efw->pull_ptr;
|
|
|
+
|
|
|
+ while (!dev_lock_changed && !queued) {
|
|
|
prepare_to_wait(&efw->hwdep_wait, &wait, TASK_INTERRUPTIBLE);
|
|
|
spin_unlock_irq(&efw->lock);
|
|
|
schedule();
|
|
@@ -109,15 +140,17 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
|
|
|
if (signal_pending(current))
|
|
|
return -ERESTARTSYS;
|
|
|
spin_lock_irq(&efw->lock);
|
|
|
+ dev_lock_changed = efw->dev_lock_changed;
|
|
|
+ queued = efw->push_ptr != efw->pull_ptr;
|
|
|
}
|
|
|
|
|
|
- if (efw->dev_lock_changed)
|
|
|
+ spin_unlock_irq(&efw->lock);
|
|
|
+
|
|
|
+ if (dev_lock_changed)
|
|
|
count = hwdep_read_locked(efw, buf, count, offset);
|
|
|
- else if (efw->resp_queues > 0)
|
|
|
+ else if (queued)
|
|
|
count = hwdep_read_resp_buf(efw, buf, count, offset);
|
|
|
|
|
|
- spin_unlock_irq(&efw->lock);
|
|
|
-
|
|
|
return count;
|
|
|
}
|
|
|
|
|
@@ -160,7 +193,7 @@ hwdep_poll(struct snd_hwdep *hwdep, struct file *file, poll_table *wait)
|
|
|
poll_wait(file, &efw->hwdep_wait, wait);
|
|
|
|
|
|
spin_lock_irq(&efw->lock);
|
|
|
- if (efw->dev_lock_changed || (efw->resp_queues > 0))
|
|
|
+ if (efw->dev_lock_changed || efw->pull_ptr != efw->push_ptr)
|
|
|
events = POLLIN | POLLRDNORM;
|
|
|
else
|
|
|
events = 0;
|