|
@@ -74,6 +74,8 @@ MODULE_DESCRIPTION (DRIVER_DESC);
|
|
|
MODULE_AUTHOR ("David Brownell");
|
|
|
MODULE_LICENSE ("GPL");
|
|
|
|
|
|
+static int ep_open(struct inode *, struct file *);
|
|
|
+
|
|
|
|
|
|
/*----------------------------------------------------------------------*/
|
|
|
|
|
@@ -283,14 +285,15 @@ static void epio_complete (struct usb_ep *ep, struct usb_request *req)
|
|
|
* still need dev->lock to use epdata->ep.
|
|
|
*/
|
|
|
static int
|
|
|
-get_ready_ep (unsigned f_flags, struct ep_data *epdata)
|
|
|
+get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write)
|
|
|
{
|
|
|
int val;
|
|
|
|
|
|
if (f_flags & O_NONBLOCK) {
|
|
|
if (!mutex_trylock(&epdata->lock))
|
|
|
goto nonblock;
|
|
|
- if (epdata->state != STATE_EP_ENABLED) {
|
|
|
+ if (epdata->state != STATE_EP_ENABLED &&
|
|
|
+ (!is_write || epdata->state != STATE_EP_READY)) {
|
|
|
mutex_unlock(&epdata->lock);
|
|
|
nonblock:
|
|
|
val = -EAGAIN;
|
|
@@ -305,18 +308,20 @@ nonblock:
|
|
|
|
|
|
switch (epdata->state) {
|
|
|
case STATE_EP_ENABLED:
|
|
|
+ return 0;
|
|
|
+ case STATE_EP_READY: /* not configured yet */
|
|
|
+ if (is_write)
|
|
|
+ return 0;
|
|
|
+ // FALLTHRU
|
|
|
+ case STATE_EP_UNBOUND: /* clean disconnect */
|
|
|
break;
|
|
|
// case STATE_EP_DISABLED: /* "can't happen" */
|
|
|
- // case STATE_EP_READY: /* "can't happen" */
|
|
|
default: /* error! */
|
|
|
pr_debug ("%s: ep %p not available, state %d\n",
|
|
|
shortname, epdata, epdata->state);
|
|
|
- // FALLTHROUGH
|
|
|
- case STATE_EP_UNBOUND: /* clean disconnect */
|
|
|
- val = -ENODEV;
|
|
|
- mutex_unlock(&epdata->lock);
|
|
|
}
|
|
|
- return val;
|
|
|
+ mutex_unlock(&epdata->lock);
|
|
|
+ return -ENODEV;
|
|
|
}
|
|
|
|
|
|
static ssize_t
|
|
@@ -363,97 +368,6 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
|
|
|
return value;
|
|
|
}
|
|
|
|
|
|
-
|
|
|
-/* handle a synchronous OUT bulk/intr/iso transfer */
|
|
|
-static ssize_t
|
|
|
-ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
|
|
|
-{
|
|
|
- struct ep_data *data = fd->private_data;
|
|
|
- void *kbuf;
|
|
|
- ssize_t value;
|
|
|
-
|
|
|
- if ((value = get_ready_ep (fd->f_flags, data)) < 0)
|
|
|
- return value;
|
|
|
-
|
|
|
- /* halt any endpoint by doing a "wrong direction" i/o call */
|
|
|
- if (usb_endpoint_dir_in(&data->desc)) {
|
|
|
- if (usb_endpoint_xfer_isoc(&data->desc)) {
|
|
|
- mutex_unlock(&data->lock);
|
|
|
- return -EINVAL;
|
|
|
- }
|
|
|
- DBG (data->dev, "%s halt\n", data->name);
|
|
|
- spin_lock_irq (&data->dev->lock);
|
|
|
- if (likely (data->ep != NULL))
|
|
|
- usb_ep_set_halt (data->ep);
|
|
|
- spin_unlock_irq (&data->dev->lock);
|
|
|
- mutex_unlock(&data->lock);
|
|
|
- return -EBADMSG;
|
|
|
- }
|
|
|
-
|
|
|
- /* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */
|
|
|
-
|
|
|
- value = -ENOMEM;
|
|
|
- kbuf = kmalloc (len, GFP_KERNEL);
|
|
|
- if (unlikely (!kbuf))
|
|
|
- goto free1;
|
|
|
-
|
|
|
- value = ep_io (data, kbuf, len);
|
|
|
- VDEBUG (data->dev, "%s read %zu OUT, status %d\n",
|
|
|
- data->name, len, (int) value);
|
|
|
- if (value >= 0 && copy_to_user (buf, kbuf, value))
|
|
|
- value = -EFAULT;
|
|
|
-
|
|
|
-free1:
|
|
|
- mutex_unlock(&data->lock);
|
|
|
- kfree (kbuf);
|
|
|
- return value;
|
|
|
-}
|
|
|
-
|
|
|
-/* handle a synchronous IN bulk/intr/iso transfer */
|
|
|
-static ssize_t
|
|
|
-ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
|
|
|
-{
|
|
|
- struct ep_data *data = fd->private_data;
|
|
|
- void *kbuf;
|
|
|
- ssize_t value;
|
|
|
-
|
|
|
- if ((value = get_ready_ep (fd->f_flags, data)) < 0)
|
|
|
- return value;
|
|
|
-
|
|
|
- /* halt any endpoint by doing a "wrong direction" i/o call */
|
|
|
- if (!usb_endpoint_dir_in(&data->desc)) {
|
|
|
- if (usb_endpoint_xfer_isoc(&data->desc)) {
|
|
|
- mutex_unlock(&data->lock);
|
|
|
- return -EINVAL;
|
|
|
- }
|
|
|
- DBG (data->dev, "%s halt\n", data->name);
|
|
|
- spin_lock_irq (&data->dev->lock);
|
|
|
- if (likely (data->ep != NULL))
|
|
|
- usb_ep_set_halt (data->ep);
|
|
|
- spin_unlock_irq (&data->dev->lock);
|
|
|
- mutex_unlock(&data->lock);
|
|
|
- return -EBADMSG;
|
|
|
- }
|
|
|
-
|
|
|
- /* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */
|
|
|
-
|
|
|
- value = -ENOMEM;
|
|
|
- kbuf = memdup_user(buf, len);
|
|
|
- if (IS_ERR(kbuf)) {
|
|
|
- value = PTR_ERR(kbuf);
|
|
|
- kbuf = NULL;
|
|
|
- goto free1;
|
|
|
- }
|
|
|
-
|
|
|
- value = ep_io (data, kbuf, len);
|
|
|
- VDEBUG (data->dev, "%s write %zu IN, status %d\n",
|
|
|
- data->name, len, (int) value);
|
|
|
-free1:
|
|
|
- mutex_unlock(&data->lock);
|
|
|
- kfree (kbuf);
|
|
|
- return value;
|
|
|
-}
|
|
|
-
|
|
|
static int
|
|
|
ep_release (struct inode *inode, struct file *fd)
|
|
|
{
|
|
@@ -481,7 +395,7 @@ static long ep_ioctl(struct file *fd, unsigned code, unsigned long value)
|
|
|
struct ep_data *data = fd->private_data;
|
|
|
int status;
|
|
|
|
|
|
- if ((status = get_ready_ep (fd->f_flags, data)) < 0)
|
|
|
+ if ((status = get_ready_ep (fd->f_flags, data, false)) < 0)
|
|
|
return status;
|
|
|
|
|
|
spin_lock_irq (&data->dev->lock);
|
|
@@ -517,8 +431,8 @@ struct kiocb_priv {
|
|
|
struct mm_struct *mm;
|
|
|
struct work_struct work;
|
|
|
void *buf;
|
|
|
- const struct iovec *iv;
|
|
|
- unsigned long nr_segs;
|
|
|
+ struct iov_iter to;
|
|
|
+ const void *to_free;
|
|
|
unsigned actual;
|
|
|
};
|
|
|
|
|
@@ -541,35 +455,6 @@ static int ep_aio_cancel(struct kiocb *iocb)
|
|
|
return value;
|
|
|
}
|
|
|
|
|
|
-static ssize_t ep_copy_to_user(struct kiocb_priv *priv)
|
|
|
-{
|
|
|
- ssize_t len, total;
|
|
|
- void *to_copy;
|
|
|
- int i;
|
|
|
-
|
|
|
- /* copy stuff into user buffers */
|
|
|
- total = priv->actual;
|
|
|
- len = 0;
|
|
|
- to_copy = priv->buf;
|
|
|
- for (i=0; i < priv->nr_segs; i++) {
|
|
|
- ssize_t this = min((ssize_t)(priv->iv[i].iov_len), total);
|
|
|
-
|
|
|
- if (copy_to_user(priv->iv[i].iov_base, to_copy, this)) {
|
|
|
- if (len == 0)
|
|
|
- len = -EFAULT;
|
|
|
- break;
|
|
|
- }
|
|
|
-
|
|
|
- total -= this;
|
|
|
- len += this;
|
|
|
- to_copy += this;
|
|
|
- if (total == 0)
|
|
|
- break;
|
|
|
- }
|
|
|
-
|
|
|
- return len;
|
|
|
-}
|
|
|
-
|
|
|
static void ep_user_copy_worker(struct work_struct *work)
|
|
|
{
|
|
|
struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work);
|
|
@@ -578,13 +463,16 @@ static void ep_user_copy_worker(struct work_struct *work)
|
|
|
size_t ret;
|
|
|
|
|
|
use_mm(mm);
|
|
|
- ret = ep_copy_to_user(priv);
|
|
|
+ ret = copy_to_iter(priv->buf, priv->actual, &priv->to);
|
|
|
unuse_mm(mm);
|
|
|
+ if (!ret)
|
|
|
+ ret = -EFAULT;
|
|
|
|
|
|
/* completing the iocb can drop the ctx and mm, don't touch mm after */
|
|
|
aio_complete(iocb, ret, ret);
|
|
|
|
|
|
kfree(priv->buf);
|
|
|
+ kfree(priv->to_free);
|
|
|
kfree(priv);
|
|
|
}
|
|
|
|
|
@@ -603,8 +491,9 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
|
|
|
* don't need to copy anything to userspace, so we can
|
|
|
* complete the aio request immediately.
|
|
|
*/
|
|
|
- if (priv->iv == NULL || unlikely(req->actual == 0)) {
|
|
|
+ if (priv->to_free == NULL || unlikely(req->actual == 0)) {
|
|
|
kfree(req->buf);
|
|
|
+ kfree(priv->to_free);
|
|
|
kfree(priv);
|
|
|
iocb->private = NULL;
|
|
|
/* aio_complete() reports bytes-transferred _and_ faults */
|
|
@@ -618,6 +507,7 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
|
|
|
|
|
|
priv->buf = req->buf;
|
|
|
priv->actual = req->actual;
|
|
|
+ INIT_WORK(&priv->work, ep_user_copy_worker);
|
|
|
schedule_work(&priv->work);
|
|
|
}
|
|
|
spin_unlock(&epdata->dev->lock);
|
|
@@ -626,38 +516,17 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
|
|
|
put_ep(epdata);
|
|
|
}
|
|
|
|
|
|
-static ssize_t
|
|
|
-ep_aio_rwtail(
|
|
|
- struct kiocb *iocb,
|
|
|
- char *buf,
|
|
|
- size_t len,
|
|
|
- struct ep_data *epdata,
|
|
|
- const struct iovec *iv,
|
|
|
- unsigned long nr_segs
|
|
|
-)
|
|
|
+static ssize_t ep_aio(struct kiocb *iocb,
|
|
|
+ struct kiocb_priv *priv,
|
|
|
+ struct ep_data *epdata,
|
|
|
+ char *buf,
|
|
|
+ size_t len)
|
|
|
{
|
|
|
- struct kiocb_priv *priv;
|
|
|
- struct usb_request *req;
|
|
|
- ssize_t value;
|
|
|
+ struct usb_request *req;
|
|
|
+ ssize_t value;
|
|
|
|
|
|
- priv = kmalloc(sizeof *priv, GFP_KERNEL);
|
|
|
- if (!priv) {
|
|
|
- value = -ENOMEM;
|
|
|
-fail:
|
|
|
- kfree(buf);
|
|
|
- return value;
|
|
|
- }
|
|
|
iocb->private = priv;
|
|
|
priv->iocb = iocb;
|
|
|
- priv->iv = iv;
|
|
|
- priv->nr_segs = nr_segs;
|
|
|
- INIT_WORK(&priv->work, ep_user_copy_worker);
|
|
|
-
|
|
|
- value = get_ready_ep(iocb->ki_filp->f_flags, epdata);
|
|
|
- if (unlikely(value < 0)) {
|
|
|
- kfree(priv);
|
|
|
- goto fail;
|
|
|
- }
|
|
|
|
|
|
kiocb_set_cancel_fn(iocb, ep_aio_cancel);
|
|
|
get_ep(epdata);
|
|
@@ -669,75 +538,154 @@ fail:
|
|
|
* allocate or submit those if the host disconnected.
|
|
|
*/
|
|
|
spin_lock_irq(&epdata->dev->lock);
|
|
|
- if (likely(epdata->ep)) {
|
|
|
- req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
|
|
|
- if (likely(req)) {
|
|
|
- priv->req = req;
|
|
|
- req->buf = buf;
|
|
|
- req->length = len;
|
|
|
- req->complete = ep_aio_complete;
|
|
|
- req->context = iocb;
|
|
|
- value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
|
|
|
- if (unlikely(0 != value))
|
|
|
- usb_ep_free_request(epdata->ep, req);
|
|
|
- } else
|
|
|
- value = -EAGAIN;
|
|
|
- } else
|
|
|
- value = -ENODEV;
|
|
|
- spin_unlock_irq(&epdata->dev->lock);
|
|
|
+ value = -ENODEV;
|
|
|
+ if (unlikely(epdata->ep))
|
|
|
+ goto fail;
|
|
|
|
|
|
- mutex_unlock(&epdata->lock);
|
|
|
+ req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
|
|
|
+ value = -ENOMEM;
|
|
|
+ if (unlikely(!req))
|
|
|
+ goto fail;
|
|
|
|
|
|
- if (unlikely(value)) {
|
|
|
- kfree(priv);
|
|
|
- put_ep(epdata);
|
|
|
- } else
|
|
|
- value = -EIOCBQUEUED;
|
|
|
+ priv->req = req;
|
|
|
+ req->buf = buf;
|
|
|
+ req->length = len;
|
|
|
+ req->complete = ep_aio_complete;
|
|
|
+ req->context = iocb;
|
|
|
+ value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
|
|
|
+ if (unlikely(0 != value)) {
|
|
|
+ usb_ep_free_request(epdata->ep, req);
|
|
|
+ goto fail;
|
|
|
+ }
|
|
|
+ spin_unlock_irq(&epdata->dev->lock);
|
|
|
+ return -EIOCBQUEUED;
|
|
|
+
|
|
|
+fail:
|
|
|
+ spin_unlock_irq(&epdata->dev->lock);
|
|
|
+ kfree(priv->to_free);
|
|
|
+ kfree(priv);
|
|
|
+ put_ep(epdata);
|
|
|
return value;
|
|
|
}
|
|
|
|
|
|
static ssize_t
|
|
|
-ep_aio_read(struct kiocb *iocb, const struct iovec *iov,
|
|
|
- unsigned long nr_segs, loff_t o)
|
|
|
+ep_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
|
|
{
|
|
|
- struct ep_data *epdata = iocb->ki_filp->private_data;
|
|
|
- char *buf;
|
|
|
+ struct file *file = iocb->ki_filp;
|
|
|
+ struct ep_data *epdata = file->private_data;
|
|
|
+ size_t len = iov_iter_count(to);
|
|
|
+ ssize_t value;
|
|
|
+ char *buf;
|
|
|
|
|
|
- if (unlikely(usb_endpoint_dir_in(&epdata->desc)))
|
|
|
- return -EINVAL;
|
|
|
+ if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0)
|
|
|
+ return value;
|
|
|
|
|
|
- buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL);
|
|
|
- if (unlikely(!buf))
|
|
|
- return -ENOMEM;
|
|
|
+ /* halt any endpoint by doing a "wrong direction" i/o call */
|
|
|
+ if (usb_endpoint_dir_in(&epdata->desc)) {
|
|
|
+ if (usb_endpoint_xfer_isoc(&epdata->desc) ||
|
|
|
+ !is_sync_kiocb(iocb)) {
|
|
|
+ mutex_unlock(&epdata->lock);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+ DBG (epdata->dev, "%s halt\n", epdata->name);
|
|
|
+ spin_lock_irq(&epdata->dev->lock);
|
|
|
+ if (likely(epdata->ep != NULL))
|
|
|
+ usb_ep_set_halt(epdata->ep);
|
|
|
+ spin_unlock_irq(&epdata->dev->lock);
|
|
|
+ mutex_unlock(&epdata->lock);
|
|
|
+ return -EBADMSG;
|
|
|
+ }
|
|
|
|
|
|
- return ep_aio_rwtail(iocb, buf, iocb->ki_nbytes, epdata, iov, nr_segs);
|
|
|
+ buf = kmalloc(len, GFP_KERNEL);
|
|
|
+ if (unlikely(!buf)) {
|
|
|
+ mutex_unlock(&epdata->lock);
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+ if (is_sync_kiocb(iocb)) {
|
|
|
+ value = ep_io(epdata, buf, len);
|
|
|
+ if (value >= 0 && copy_to_iter(buf, value, to))
|
|
|
+ value = -EFAULT;
|
|
|
+ } else {
|
|
|
+ struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
|
|
|
+ value = -ENOMEM;
|
|
|
+ if (!priv)
|
|
|
+ goto fail;
|
|
|
+ priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL);
|
|
|
+ if (!priv->to_free) {
|
|
|
+ kfree(priv);
|
|
|
+ goto fail;
|
|
|
+ }
|
|
|
+ value = ep_aio(iocb, priv, epdata, buf, len);
|
|
|
+ if (value == -EIOCBQUEUED)
|
|
|
+ buf = NULL;
|
|
|
+ }
|
|
|
+fail:
|
|
|
+ kfree(buf);
|
|
|
+ mutex_unlock(&epdata->lock);
|
|
|
+ return value;
|
|
|
}
|
|
|
|
|
|
+static ssize_t ep_config(struct ep_data *, const char *, size_t);
|
|
|
+
|
|
|
static ssize_t
|
|
|
-ep_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
|
|
- unsigned long nr_segs, loff_t o)
|
|
|
+ep_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|
|
{
|
|
|
- struct ep_data *epdata = iocb->ki_filp->private_data;
|
|
|
- char *buf;
|
|
|
- size_t len = 0;
|
|
|
- int i = 0;
|
|
|
+ struct file *file = iocb->ki_filp;
|
|
|
+ struct ep_data *epdata = file->private_data;
|
|
|
+ size_t len = iov_iter_count(from);
|
|
|
+ bool configured;
|
|
|
+ ssize_t value;
|
|
|
+ char *buf;
|
|
|
+
|
|
|
+ if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0)
|
|
|
+ return value;
|
|
|
|
|
|
- if (unlikely(!usb_endpoint_dir_in(&epdata->desc)))
|
|
|
- return -EINVAL;
|
|
|
+ configured = epdata->state == STATE_EP_ENABLED;
|
|
|
|
|
|
- buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL);
|
|
|
- if (unlikely(!buf))
|
|
|
+ /* halt any endpoint by doing a "wrong direction" i/o call */
|
|
|
+ if (configured && !usb_endpoint_dir_in(&epdata->desc)) {
|
|
|
+ if (usb_endpoint_xfer_isoc(&epdata->desc) ||
|
|
|
+ !is_sync_kiocb(iocb)) {
|
|
|
+ mutex_unlock(&epdata->lock);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+ DBG (epdata->dev, "%s halt\n", epdata->name);
|
|
|
+ spin_lock_irq(&epdata->dev->lock);
|
|
|
+ if (likely(epdata->ep != NULL))
|
|
|
+ usb_ep_set_halt(epdata->ep);
|
|
|
+ spin_unlock_irq(&epdata->dev->lock);
|
|
|
+ mutex_unlock(&epdata->lock);
|
|
|
+ return -EBADMSG;
|
|
|
+ }
|
|
|
+
|
|
|
+ buf = kmalloc(len, GFP_KERNEL);
|
|
|
+ if (unlikely(!buf)) {
|
|
|
+ mutex_unlock(&epdata->lock);
|
|
|
return -ENOMEM;
|
|
|
+ }
|
|
|
|
|
|
- for (i=0; i < nr_segs; i++) {
|
|
|
- if (unlikely(copy_from_user(&buf[len], iov[i].iov_base,
|
|
|
- iov[i].iov_len) != 0)) {
|
|
|
- kfree(buf);
|
|
|
- return -EFAULT;
|
|
|
+ if (unlikely(copy_from_iter(buf, len, from) != len)) {
|
|
|
+ value = -EFAULT;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (unlikely(!configured)) {
|
|
|
+ value = ep_config(epdata, buf, len);
|
|
|
+ } else if (is_sync_kiocb(iocb)) {
|
|
|
+ value = ep_io(epdata, buf, len);
|
|
|
+ } else {
|
|
|
+ struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
|
|
|
+ value = -ENOMEM;
|
|
|
+ if (priv) {
|
|
|
+ value = ep_aio(iocb, priv, epdata, buf, len);
|
|
|
+ if (value == -EIOCBQUEUED)
|
|
|
+ buf = NULL;
|
|
|
}
|
|
|
- len += iov[i].iov_len;
|
|
|
}
|
|
|
- return ep_aio_rwtail(iocb, buf, len, epdata, NULL, 0);
|
|
|
+out:
|
|
|
+ kfree(buf);
|
|
|
+ mutex_unlock(&epdata->lock);
|
|
|
+ return value;
|
|
|
}
|
|
|
|
|
|
/*----------------------------------------------------------------------*/
|
|
@@ -745,15 +693,15 @@ ep_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
|
|
/* used after endpoint configuration */
|
|
|
static const struct file_operations ep_io_operations = {
|
|
|
.owner = THIS_MODULE,
|
|
|
- .llseek = no_llseek,
|
|
|
|
|
|
- .read = ep_read,
|
|
|
- .write = ep_write,
|
|
|
- .unlocked_ioctl = ep_ioctl,
|
|
|
+ .open = ep_open,
|
|
|
.release = ep_release,
|
|
|
-
|
|
|
- .aio_read = ep_aio_read,
|
|
|
- .aio_write = ep_aio_write,
|
|
|
+ .llseek = no_llseek,
|
|
|
+ .read = new_sync_read,
|
|
|
+ .write = new_sync_write,
|
|
|
+ .unlocked_ioctl = ep_ioctl,
|
|
|
+ .read_iter = ep_read_iter,
|
|
|
+ .write_iter = ep_write_iter,
|
|
|
};
|
|
|
|
|
|
/* ENDPOINT INITIALIZATION
|
|
@@ -770,17 +718,12 @@ static const struct file_operations ep_io_operations = {
|
|
|
* speed descriptor, then optional high speed descriptor.
|
|
|
*/
|
|
|
static ssize_t
|
|
|
-ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
|
|
|
+ep_config (struct ep_data *data, const char *buf, size_t len)
|
|
|
{
|
|
|
- struct ep_data *data = fd->private_data;
|
|
|
struct usb_ep *ep;
|
|
|
u32 tag;
|
|
|
int value, length = len;
|
|
|
|
|
|
- value = mutex_lock_interruptible(&data->lock);
|
|
|
- if (value < 0)
|
|
|
- return value;
|
|
|
-
|
|
|
if (data->state != STATE_EP_READY) {
|
|
|
value = -EL2HLT;
|
|
|
goto fail;
|
|
@@ -791,9 +734,7 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
|
|
|
goto fail0;
|
|
|
|
|
|
/* we might need to change message format someday */
|
|
|
- if (copy_from_user (&tag, buf, 4)) {
|
|
|
- goto fail1;
|
|
|
- }
|
|
|
+ memcpy(&tag, buf, 4);
|
|
|
if (tag != 1) {
|
|
|
DBG(data->dev, "config %s, bad tag %d\n", data->name, tag);
|
|
|
goto fail0;
|
|
@@ -806,19 +747,15 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
|
|
|
*/
|
|
|
|
|
|
/* full/low speed descriptor, then high speed */
|
|
|
- if (copy_from_user (&data->desc, buf, USB_DT_ENDPOINT_SIZE)) {
|
|
|
- goto fail1;
|
|
|
- }
|
|
|
+ memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE);
|
|
|
if (data->desc.bLength != USB_DT_ENDPOINT_SIZE
|
|
|
|| data->desc.bDescriptorType != USB_DT_ENDPOINT)
|
|
|
goto fail0;
|
|
|
if (len != USB_DT_ENDPOINT_SIZE) {
|
|
|
if (len != 2 * USB_DT_ENDPOINT_SIZE)
|
|
|
goto fail0;
|
|
|
- if (copy_from_user (&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
|
|
|
- USB_DT_ENDPOINT_SIZE)) {
|
|
|
- goto fail1;
|
|
|
- }
|
|
|
+ memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
|
|
|
+ USB_DT_ENDPOINT_SIZE);
|
|
|
if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE
|
|
|
|| data->hs_desc.bDescriptorType
|
|
|
!= USB_DT_ENDPOINT) {
|
|
@@ -840,24 +777,20 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
|
|
|
case USB_SPEED_LOW:
|
|
|
case USB_SPEED_FULL:
|
|
|
ep->desc = &data->desc;
|
|
|
- value = usb_ep_enable(ep);
|
|
|
- if (value == 0)
|
|
|
- data->state = STATE_EP_ENABLED;
|
|
|
break;
|
|
|
case USB_SPEED_HIGH:
|
|
|
/* fails if caller didn't provide that descriptor... */
|
|
|
ep->desc = &data->hs_desc;
|
|
|
- value = usb_ep_enable(ep);
|
|
|
- if (value == 0)
|
|
|
- data->state = STATE_EP_ENABLED;
|
|
|
break;
|
|
|
default:
|
|
|
DBG(data->dev, "unconnected, %s init abandoned\n",
|
|
|
data->name);
|
|
|
value = -EINVAL;
|
|
|
+ goto gone;
|
|
|
}
|
|
|
+ value = usb_ep_enable(ep);
|
|
|
if (value == 0) {
|
|
|
- fd->f_op = &ep_io_operations;
|
|
|
+ data->state = STATE_EP_ENABLED;
|
|
|
value = length;
|
|
|
}
|
|
|
gone:
|
|
@@ -867,14 +800,10 @@ fail:
|
|
|
data->desc.bDescriptorType = 0;
|
|
|
data->hs_desc.bDescriptorType = 0;
|
|
|
}
|
|
|
- mutex_unlock(&data->lock);
|
|
|
return value;
|
|
|
fail0:
|
|
|
value = -EINVAL;
|
|
|
goto fail;
|
|
|
-fail1:
|
|
|
- value = -EFAULT;
|
|
|
- goto fail;
|
|
|
}
|
|
|
|
|
|
static int
|
|
@@ -902,15 +831,6 @@ ep_open (struct inode *inode, struct file *fd)
|
|
|
return value;
|
|
|
}
|
|
|
|
|
|
-/* used before endpoint configuration */
|
|
|
-static const struct file_operations ep_config_operations = {
|
|
|
- .llseek = no_llseek,
|
|
|
-
|
|
|
- .open = ep_open,
|
|
|
- .write = ep_config,
|
|
|
- .release = ep_release,
|
|
|
-};
|
|
|
-
|
|
|
/*----------------------------------------------------------------------*/
|
|
|
|
|
|
/* EP0 IMPLEMENTATION can be partly in userspace.
|
|
@@ -989,6 +909,10 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
|
|
|
enum ep0_state state;
|
|
|
|
|
|
spin_lock_irq (&dev->lock);
|
|
|
+ if (dev->state <= STATE_DEV_OPENED) {
|
|
|
+ retval = -EINVAL;
|
|
|
+ goto done;
|
|
|
+ }
|
|
|
|
|
|
/* report fd mode change before acting on it */
|
|
|
if (dev->setup_abort) {
|
|
@@ -1187,8 +1111,6 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
|
|
|
struct dev_data *dev = fd->private_data;
|
|
|
ssize_t retval = -ESRCH;
|
|
|
|
|
|
- spin_lock_irq (&dev->lock);
|
|
|
-
|
|
|
/* report fd mode change before acting on it */
|
|
|
if (dev->setup_abort) {
|
|
|
dev->setup_abort = 0;
|
|
@@ -1234,7 +1156,6 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
|
|
|
} else
|
|
|
DBG (dev, "fail %s, state %d\n", __func__, dev->state);
|
|
|
|
|
|
- spin_unlock_irq (&dev->lock);
|
|
|
return retval;
|
|
|
}
|
|
|
|
|
@@ -1281,6 +1202,9 @@ ep0_poll (struct file *fd, poll_table *wait)
|
|
|
struct dev_data *dev = fd->private_data;
|
|
|
int mask = 0;
|
|
|
|
|
|
+ if (dev->state <= STATE_DEV_OPENED)
|
|
|
+ return DEFAULT_POLLMASK;
|
|
|
+
|
|
|
poll_wait(fd, &dev->wait, wait);
|
|
|
|
|
|
spin_lock_irq (&dev->lock);
|
|
@@ -1316,19 +1240,6 @@ static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-/* used after device configuration */
|
|
|
-static const struct file_operations ep0_io_operations = {
|
|
|
- .owner = THIS_MODULE,
|
|
|
- .llseek = no_llseek,
|
|
|
-
|
|
|
- .read = ep0_read,
|
|
|
- .write = ep0_write,
|
|
|
- .fasync = ep0_fasync,
|
|
|
- .poll = ep0_poll,
|
|
|
- .unlocked_ioctl = dev_ioctl,
|
|
|
- .release = dev_release,
|
|
|
-};
|
|
|
-
|
|
|
/*----------------------------------------------------------------------*/
|
|
|
|
|
|
/* The in-kernel gadget driver handles most ep0 issues, in particular
|
|
@@ -1650,7 +1561,7 @@ static int activate_ep_files (struct dev_data *dev)
|
|
|
goto enomem1;
|
|
|
|
|
|
data->dentry = gadgetfs_create_file (dev->sb, data->name,
|
|
|
- data, &ep_config_operations);
|
|
|
+ data, &ep_io_operations);
|
|
|
if (!data->dentry)
|
|
|
goto enomem2;
|
|
|
list_add_tail (&data->epfiles, &dev->epfiles);
|
|
@@ -1852,6 +1763,14 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
|
|
|
u32 tag;
|
|
|
char *kbuf;
|
|
|
|
|
|
+ spin_lock_irq(&dev->lock);
|
|
|
+ if (dev->state > STATE_DEV_OPENED) {
|
|
|
+ value = ep0_write(fd, buf, len, ptr);
|
|
|
+ spin_unlock_irq(&dev->lock);
|
|
|
+ return value;
|
|
|
+ }
|
|
|
+ spin_unlock_irq(&dev->lock);
|
|
|
+
|
|
|
if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4))
|
|
|
return -EINVAL;
|
|
|
|
|
@@ -1925,7 +1844,6 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
|
|
|
* on, they can work ... except in cleanup paths that
|
|
|
* kick in after the ep0 descriptor is closed.
|
|
|
*/
|
|
|
- fd->f_op = &ep0_io_operations;
|
|
|
value = len;
|
|
|
}
|
|
|
return value;
|
|
@@ -1956,12 +1874,14 @@ dev_open (struct inode *inode, struct file *fd)
|
|
|
return value;
|
|
|
}
|
|
|
|
|
|
-static const struct file_operations dev_init_operations = {
|
|
|
+static const struct file_operations ep0_operations = {
|
|
|
.llseek = no_llseek,
|
|
|
|
|
|
.open = dev_open,
|
|
|
+ .read = ep0_read,
|
|
|
.write = dev_config,
|
|
|
.fasync = ep0_fasync,
|
|
|
+ .poll = ep0_poll,
|
|
|
.unlocked_ioctl = dev_ioctl,
|
|
|
.release = dev_release,
|
|
|
};
|
|
@@ -2077,7 +1997,7 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
|
|
|
goto Enomem;
|
|
|
|
|
|
dev->sb = sb;
|
|
|
- dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &dev_init_operations);
|
|
|
+ dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations);
|
|
|
if (!dev->dentry) {
|
|
|
put_dev(dev);
|
|
|
goto Enomem;
|