|
@@ -212,7 +212,6 @@ struct mport_cdev_priv {
|
|
|
#ifdef CONFIG_RAPIDIO_DMA_ENGINE
|
|
|
struct dma_chan *dmach;
|
|
|
struct list_head async_list;
|
|
|
- struct list_head pend_list;
|
|
|
spinlock_t req_lock;
|
|
|
struct mutex dma_lock;
|
|
|
struct kref dma_ref;
|
|
@@ -258,8 +257,6 @@ static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait);
|
|
|
static struct class *dev_class;
|
|
|
static dev_t dev_number;
|
|
|
|
|
|
-static struct workqueue_struct *dma_wq;
|
|
|
-
|
|
|
static void mport_release_mapping(struct kref *ref);
|
|
|
|
|
|
static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
|
|
@@ -539,6 +536,7 @@ static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg)
|
|
|
#ifdef CONFIG_RAPIDIO_DMA_ENGINE
|
|
|
|
|
|
struct mport_dma_req {
|
|
|
+ struct kref refcount;
|
|
|
struct list_head node;
|
|
|
struct file *filp;
|
|
|
struct mport_cdev_priv *priv;
|
|
@@ -554,11 +552,6 @@ struct mport_dma_req {
|
|
|
struct completion req_comp;
|
|
|
};
|
|
|
|
|
|
-struct mport_faf_work {
|
|
|
- struct work_struct work;
|
|
|
- struct mport_dma_req *req;
|
|
|
-};
|
|
|
-
|
|
|
static void mport_release_def_dma(struct kref *dma_ref)
|
|
|
{
|
|
|
struct mport_dev *md =
|
|
@@ -578,8 +571,10 @@ static void mport_release_dma(struct kref *dma_ref)
|
|
|
complete(&priv->comp);
|
|
|
}
|
|
|
|
|
|
-static void dma_req_free(struct mport_dma_req *req)
|
|
|
+static void dma_req_free(struct kref *ref)
|
|
|
{
|
|
|
+ struct mport_dma_req *req = container_of(ref, struct mport_dma_req,
|
|
|
+ refcount);
|
|
|
struct mport_cdev_priv *priv = req->priv;
|
|
|
unsigned int i;
|
|
|
|
|
@@ -611,30 +606,7 @@ static void dma_xfer_callback(void *param)
|
|
|
req->status = dma_async_is_tx_complete(priv->dmach, req->cookie,
|
|
|
NULL, NULL);
|
|
|
complete(&req->req_comp);
|
|
|
-}
|
|
|
-
|
|
|
-static void dma_faf_cleanup(struct work_struct *_work)
|
|
|
-{
|
|
|
- struct mport_faf_work *work = container_of(_work,
|
|
|
- struct mport_faf_work, work);
|
|
|
- struct mport_dma_req *req = work->req;
|
|
|
-
|
|
|
- dma_req_free(req);
|
|
|
- kfree(work);
|
|
|
-}
|
|
|
-
|
|
|
-static void dma_faf_callback(void *param)
|
|
|
-{
|
|
|
- struct mport_dma_req *req = (struct mport_dma_req *)param;
|
|
|
- struct mport_faf_work *work;
|
|
|
-
|
|
|
- work = kmalloc(sizeof(*work), GFP_ATOMIC);
|
|
|
- if (!work)
|
|
|
- return;
|
|
|
-
|
|
|
- INIT_WORK(&work->work, dma_faf_cleanup);
|
|
|
- work->req = req;
|
|
|
- queue_work(dma_wq, &work->work);
|
|
|
+ kref_put(&req->refcount, dma_req_free);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -765,16 +737,14 @@ static int do_dma_request(struct mport_dma_req *req,
|
|
|
goto err_out;
|
|
|
}
|
|
|
|
|
|
- if (sync == RIO_TRANSFER_FAF)
|
|
|
- tx->callback = dma_faf_callback;
|
|
|
- else
|
|
|
- tx->callback = dma_xfer_callback;
|
|
|
+ tx->callback = dma_xfer_callback;
|
|
|
tx->callback_param = req;
|
|
|
|
|
|
req->dmach = chan;
|
|
|
req->sync = sync;
|
|
|
req->status = DMA_IN_PROGRESS;
|
|
|
init_completion(&req->req_comp);
|
|
|
+ kref_get(&req->refcount);
|
|
|
|
|
|
cookie = dmaengine_submit(tx);
|
|
|
req->cookie = cookie;
|
|
@@ -785,6 +755,7 @@ static int do_dma_request(struct mport_dma_req *req,
|
|
|
if (dma_submit_error(cookie)) {
|
|
|
rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)",
|
|
|
cookie, xfer->rio_addr, xfer->length);
|
|
|
+ kref_put(&req->refcount, dma_req_free);
|
|
|
ret = -EIO;
|
|
|
goto err_out;
|
|
|
}
|
|
@@ -860,6 +831,8 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
|
|
|
if (!req)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
+ kref_init(&req->refcount);
|
|
|
+
|
|
|
ret = get_dma_channel(priv);
|
|
|
if (ret) {
|
|
|
kfree(req);
|
|
@@ -968,42 +941,20 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
|
|
|
ret = do_dma_request(req, xfer, sync, nents);
|
|
|
|
|
|
if (ret >= 0) {
|
|
|
- if (sync == RIO_TRANSFER_SYNC)
|
|
|
- goto sync_out;
|
|
|
- return ret; /* return ASYNC cookie */
|
|
|
- }
|
|
|
-
|
|
|
- if (ret == -ETIMEDOUT || ret == -EINTR) {
|
|
|
- /*
|
|
|
- * This can happen only in case of SYNC transfer.
|
|
|
- * Do not free unfinished request structure immediately.
|
|
|
- * Place it into pending list and deal with it later
|
|
|
- */
|
|
|
- spin_lock(&priv->req_lock);
|
|
|
- list_add_tail(&req->node, &priv->pend_list);
|
|
|
- spin_unlock(&priv->req_lock);
|
|
|
- return ret;
|
|
|
+ if (sync == RIO_TRANSFER_ASYNC)
|
|
|
+ return ret; /* return ASYNC cookie */
|
|
|
+ } else {
|
|
|
+ rmcd_debug(DMA, "do_dma_request failed with err=%d", ret);
|
|
|
}
|
|
|
|
|
|
-
|
|
|
- rmcd_debug(DMA, "do_dma_request failed with err=%d", ret);
|
|
|
-sync_out:
|
|
|
- dma_unmap_sg(chan->device->dev, req->sgt.sgl, req->sgt.nents, dir);
|
|
|
- sg_free_table(&req->sgt);
|
|
|
err_pg:
|
|
|
- if (page_list) {
|
|
|
+ if (!req->page_list) {
|
|
|
for (i = 0; i < nr_pages; i++)
|
|
|
put_page(page_list[i]);
|
|
|
kfree(page_list);
|
|
|
}
|
|
|
err_req:
|
|
|
- if (req->map) {
|
|
|
- mutex_lock(&md->buf_mutex);
|
|
|
- kref_put(&req->map->ref, mport_release_mapping);
|
|
|
- mutex_unlock(&md->buf_mutex);
|
|
|
- }
|
|
|
- put_dma_channel(priv);
|
|
|
- kfree(req);
|
|
|
+ kref_put(&req->refcount, dma_req_free);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -1121,7 +1072,7 @@ static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg)
|
|
|
ret = 0;
|
|
|
|
|
|
if (req->status != DMA_IN_PROGRESS && req->status != DMA_PAUSED)
|
|
|
- dma_req_free(req);
|
|
|
+ kref_put(&req->refcount, dma_req_free);
|
|
|
|
|
|
return ret;
|
|
|
|
|
@@ -1966,7 +1917,6 @@ static int mport_cdev_open(struct inode *inode, struct file *filp)
|
|
|
|
|
|
#ifdef CONFIG_RAPIDIO_DMA_ENGINE
|
|
|
INIT_LIST_HEAD(&priv->async_list);
|
|
|
- INIT_LIST_HEAD(&priv->pend_list);
|
|
|
spin_lock_init(&priv->req_lock);
|
|
|
mutex_init(&priv->dma_lock);
|
|
|
#endif
|
|
@@ -2006,8 +1956,6 @@ static void mport_cdev_release_dma(struct file *filp)
|
|
|
|
|
|
md = priv->md;
|
|
|
|
|
|
- flush_workqueue(dma_wq);
|
|
|
-
|
|
|
spin_lock(&priv->req_lock);
|
|
|
if (!list_empty(&priv->async_list)) {
|
|
|
rmcd_debug(EXIT, "async list not empty filp=%p %s(%d)",
|
|
@@ -2023,20 +1971,7 @@ static void mport_cdev_release_dma(struct file *filp)
|
|
|
req->filp, req->cookie,
|
|
|
completion_done(&req->req_comp)?"yes":"no");
|
|
|
list_del(&req->node);
|
|
|
- dma_req_free(req);
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- if (!list_empty(&priv->pend_list)) {
|
|
|
- rmcd_debug(EXIT, "Free pending DMA requests for filp=%p %s(%d)",
|
|
|
- filp, current->comm, task_pid_nr(current));
|
|
|
- list_for_each_entry_safe(req,
|
|
|
- req_next, &priv->pend_list, node) {
|
|
|
- rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s",
|
|
|
- req->filp, req->cookie,
|
|
|
- completion_done(&req->req_comp)?"yes":"no");
|
|
|
- list_del(&req->node);
|
|
|
- dma_req_free(req);
|
|
|
+ kref_put(&req->refcount, dma_req_free);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -2048,15 +1983,6 @@ static void mport_cdev_release_dma(struct file *filp)
|
|
|
current->comm, task_pid_nr(current), wret);
|
|
|
}
|
|
|
|
|
|
- spin_lock(&priv->req_lock);
|
|
|
-
|
|
|
- if (!list_empty(&priv->pend_list)) {
|
|
|
- rmcd_debug(EXIT, "ATTN: pending DMA requests, filp=%p %s(%d)",
|
|
|
- filp, current->comm, task_pid_nr(current));
|
|
|
- }
|
|
|
-
|
|
|
- spin_unlock(&priv->req_lock);
|
|
|
-
|
|
|
if (priv->dmach != priv->md->dma_chan) {
|
|
|
rmcd_debug(EXIT, "Release DMA channel for filp=%p %s(%d)",
|
|
|
filp, current->comm, task_pid_nr(current));
|
|
@@ -2573,8 +2499,6 @@ static void mport_cdev_remove(struct mport_dev *md)
|
|
|
cdev_device_del(&md->cdev, &md->dev);
|
|
|
mport_cdev_kill_fasync(md);
|
|
|
|
|
|
- flush_workqueue(dma_wq);
|
|
|
-
|
|
|
/* TODO: do we need to give clients some time to close file
|
|
|
* descriptors? Simple wait for XX, or kref?
|
|
|
*/
|
|
@@ -2691,17 +2615,8 @@ static int __init mport_init(void)
|
|
|
goto err_cli;
|
|
|
}
|
|
|
|
|
|
- dma_wq = create_singlethread_workqueue("dma_wq");
|
|
|
- if (!dma_wq) {
|
|
|
- rmcd_error("failed to create DMA work queue");
|
|
|
- ret = -ENOMEM;
|
|
|
- goto err_wq;
|
|
|
- }
|
|
|
-
|
|
|
return 0;
|
|
|
|
|
|
-err_wq:
|
|
|
- class_interface_unregister(&rio_mport_interface);
|
|
|
err_cli:
|
|
|
unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
|
|
|
err_chr:
|
|
@@ -2717,7 +2632,6 @@ static void __exit mport_exit(void)
|
|
|
class_interface_unregister(&rio_mport_interface);
|
|
|
class_destroy(dev_class);
|
|
|
unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
|
|
|
- destroy_workqueue(dma_wq);
|
|
|
}
|
|
|
|
|
|
module_init(mport_init);
|