Merge branch 'for-linus' into for-next
Back-merge from for-linus just to make the further development easier.
这个提交包含在:
@@ -108,7 +108,6 @@ struct snd_efw {
|
||||
u8 *resp_buf;
|
||||
u8 *pull_ptr;
|
||||
u8 *push_ptr;
|
||||
unsigned int resp_queues;
|
||||
};
|
||||
|
||||
int snd_efw_transaction_cmd(struct fw_unit *unit,
|
||||
|
@@ -25,6 +25,7 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
|
||||
{
|
||||
unsigned int length, till_end, type;
|
||||
struct snd_efw_transaction *t;
|
||||
u8 *pull_ptr;
|
||||
long count = 0;
|
||||
|
||||
if (remained < sizeof(type) + sizeof(struct snd_efw_transaction))
|
||||
@@ -38,8 +39,17 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
|
||||
buf += sizeof(type);
|
||||
|
||||
/* write into buffer as many responses as possible */
|
||||
while (efw->resp_queues > 0) {
|
||||
t = (struct snd_efw_transaction *)(efw->pull_ptr);
|
||||
spin_lock_irq(&efw->lock);
|
||||
|
||||
/*
|
||||
* When another task reaches here during this task's access to user
|
||||
* space, it picks up current position in buffer and can read the same
|
||||
* series of responses.
|
||||
*/
|
||||
pull_ptr = efw->pull_ptr;
|
||||
|
||||
while (efw->push_ptr != pull_ptr) {
|
||||
t = (struct snd_efw_transaction *)(pull_ptr);
|
||||
length = be32_to_cpu(t->length) * sizeof(__be32);
|
||||
|
||||
/* confirm enough space for this response */
|
||||
@@ -49,26 +59,39 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
|
||||
/* copy from ring buffer to user buffer */
|
||||
while (length > 0) {
|
||||
till_end = snd_efw_resp_buf_size -
|
||||
(unsigned int)(efw->pull_ptr - efw->resp_buf);
|
||||
(unsigned int)(pull_ptr - efw->resp_buf);
|
||||
till_end = min_t(unsigned int, length, till_end);
|
||||
|
||||
if (copy_to_user(buf, efw->pull_ptr, till_end))
|
||||
spin_unlock_irq(&efw->lock);
|
||||
|
||||
if (copy_to_user(buf, pull_ptr, till_end))
|
||||
return -EFAULT;
|
||||
|
||||
efw->pull_ptr += till_end;
|
||||
if (efw->pull_ptr >= efw->resp_buf +
|
||||
snd_efw_resp_buf_size)
|
||||
efw->pull_ptr -= snd_efw_resp_buf_size;
|
||||
spin_lock_irq(&efw->lock);
|
||||
|
||||
pull_ptr += till_end;
|
||||
if (pull_ptr >= efw->resp_buf + snd_efw_resp_buf_size)
|
||||
pull_ptr -= snd_efw_resp_buf_size;
|
||||
|
||||
length -= till_end;
|
||||
buf += till_end;
|
||||
count += till_end;
|
||||
remained -= till_end;
|
||||
}
|
||||
|
||||
efw->resp_queues--;
|
||||
}
|
||||
|
||||
/*
|
||||
* All of tasks can read from the buffer nearly simultaneously, but the
|
||||
* last position for each task is different depending on the length of
|
||||
* given buffer. Here, for simplicity, a position of buffer is set by
|
||||
* the latest task. It's better for a listening application to allow one
|
||||
* thread to read from the buffer. Unless, each task can read different
|
||||
* sequence of responses depending on variation of buffer length.
|
||||
*/
|
||||
efw->pull_ptr = pull_ptr;
|
||||
|
||||
spin_unlock_irq(&efw->lock);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
@@ -76,14 +99,17 @@ static long
|
||||
hwdep_read_locked(struct snd_efw *efw, char __user *buf, long count,
|
||||
loff_t *offset)
|
||||
{
|
||||
union snd_firewire_event event;
|
||||
union snd_firewire_event event = {
|
||||
.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS,
|
||||
};
|
||||
|
||||
memset(&event, 0, sizeof(event));
|
||||
spin_lock_irq(&efw->lock);
|
||||
|
||||
event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
|
||||
event.lock_status.status = (efw->dev_lock_count > 0);
|
||||
efw->dev_lock_changed = false;
|
||||
|
||||
spin_unlock_irq(&efw->lock);
|
||||
|
||||
count = min_t(long, count, sizeof(event.lock_status));
|
||||
|
||||
if (copy_to_user(buf, &event, count))
|
||||
@@ -98,10 +124,15 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
|
||||
{
|
||||
struct snd_efw *efw = hwdep->private_data;
|
||||
DEFINE_WAIT(wait);
|
||||
bool dev_lock_changed;
|
||||
bool queued;
|
||||
|
||||
spin_lock_irq(&efw->lock);
|
||||
|
||||
while ((!efw->dev_lock_changed) && (efw->resp_queues == 0)) {
|
||||
dev_lock_changed = efw->dev_lock_changed;
|
||||
queued = efw->push_ptr != efw->pull_ptr;
|
||||
|
||||
while (!dev_lock_changed && !queued) {
|
||||
prepare_to_wait(&efw->hwdep_wait, &wait, TASK_INTERRUPTIBLE);
|
||||
spin_unlock_irq(&efw->lock);
|
||||
schedule();
|
||||
@@ -109,15 +140,17 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
|
||||
if (signal_pending(current))
|
||||
return -ERESTARTSYS;
|
||||
spin_lock_irq(&efw->lock);
|
||||
dev_lock_changed = efw->dev_lock_changed;
|
||||
queued = efw->push_ptr != efw->pull_ptr;
|
||||
}
|
||||
|
||||
if (efw->dev_lock_changed)
|
||||
count = hwdep_read_locked(efw, buf, count, offset);
|
||||
else if (efw->resp_queues > 0)
|
||||
count = hwdep_read_resp_buf(efw, buf, count, offset);
|
||||
|
||||
spin_unlock_irq(&efw->lock);
|
||||
|
||||
if (dev_lock_changed)
|
||||
count = hwdep_read_locked(efw, buf, count, offset);
|
||||
else if (queued)
|
||||
count = hwdep_read_resp_buf(efw, buf, count, offset);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
@@ -160,7 +193,7 @@ hwdep_poll(struct snd_hwdep *hwdep, struct file *file, poll_table *wait)
|
||||
poll_wait(file, &efw->hwdep_wait, wait);
|
||||
|
||||
spin_lock_irq(&efw->lock);
|
||||
if (efw->dev_lock_changed || (efw->resp_queues > 0))
|
||||
if (efw->dev_lock_changed || efw->pull_ptr != efw->push_ptr)
|
||||
events = POLLIN | POLLRDNORM;
|
||||
else
|
||||
events = 0;
|
||||
|
@@ -188,8 +188,8 @@ proc_read_queues_state(struct snd_info_entry *entry,
|
||||
else
|
||||
consumed = (unsigned int)(efw->push_ptr - efw->pull_ptr);
|
||||
|
||||
snd_iprintf(buffer, "%d %d/%d\n",
|
||||
efw->resp_queues, consumed, snd_efw_resp_buf_size);
|
||||
snd_iprintf(buffer, "%d/%d\n",
|
||||
consumed, snd_efw_resp_buf_size);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@@ -121,11 +121,11 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
|
||||
size_t capacity, till_end;
|
||||
struct snd_efw_transaction *t;
|
||||
|
||||
spin_lock_irq(&efw->lock);
|
||||
|
||||
t = (struct snd_efw_transaction *)data;
|
||||
length = min_t(size_t, be32_to_cpu(t->length) * sizeof(u32), length);
|
||||
|
||||
spin_lock_irq(&efw->lock);
|
||||
|
||||
if (efw->push_ptr < efw->pull_ptr)
|
||||
capacity = (unsigned int)(efw->pull_ptr - efw->push_ptr);
|
||||
else
|
||||
@@ -155,7 +155,6 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
|
||||
}
|
||||
|
||||
/* for hwdep */
|
||||
efw->resp_queues++;
|
||||
wake_up(&efw->hwdep_wait);
|
||||
|
||||
*rcode = RCODE_COMPLETE;
|
||||
|
@@ -16,31 +16,14 @@
|
||||
|
||||
#include "tascam.h"
|
||||
|
||||
static long hwdep_read_locked(struct snd_tscm *tscm, char __user *buf,
|
||||
long count)
|
||||
{
|
||||
union snd_firewire_event event;
|
||||
|
||||
memset(&event, 0, sizeof(event));
|
||||
|
||||
event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
|
||||
event.lock_status.status = (tscm->dev_lock_count > 0);
|
||||
tscm->dev_lock_changed = false;
|
||||
|
||||
count = min_t(long, count, sizeof(event.lock_status));
|
||||
|
||||
if (copy_to_user(buf, &event, count))
|
||||
return -EFAULT;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
|
||||
loff_t *offset)
|
||||
{
|
||||
struct snd_tscm *tscm = hwdep->private_data;
|
||||
DEFINE_WAIT(wait);
|
||||
union snd_firewire_event event;
|
||||
union snd_firewire_event event = {
|
||||
.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS,
|
||||
};
|
||||
|
||||
spin_lock_irq(&tscm->lock);
|
||||
|
||||
@@ -54,10 +37,16 @@ static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
|
||||
spin_lock_irq(&tscm->lock);
|
||||
}
|
||||
|
||||
memset(&event, 0, sizeof(event));
|
||||
count = hwdep_read_locked(tscm, buf, count);
|
||||
event.lock_status.status = (tscm->dev_lock_count > 0);
|
||||
tscm->dev_lock_changed = false;
|
||||
|
||||
spin_unlock_irq(&tscm->lock);
|
||||
|
||||
count = min_t(long, count, sizeof(event.lock_status));
|
||||
|
||||
if (copy_to_user(buf, &event, count))
|
||||
return -EFAULT;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
|
在新工单中引用
屏蔽一个用户