|
@@ -310,7 +310,8 @@ mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
|
|
|
|
|
|
+/* This function must be called with the mv_xor_chan spinlock held */
|
|
|
|
+static void mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
|
|
{
|
|
{
|
|
struct mv_xor_desc_slot *iter, *_iter;
|
|
struct mv_xor_desc_slot *iter, *_iter;
|
|
dma_cookie_t cookie = 0;
|
|
dma_cookie_t cookie = 0;
|
|
@@ -366,18 +367,13 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
|
|
mv_chan->dmachan.completed_cookie = cookie;
|
|
mv_chan->dmachan.completed_cookie = cookie;
|
|
}
|
|
}
|
|
|
|
|
|
-static void
|
|
|
|
-mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
|
|
|
|
-{
|
|
|
|
- spin_lock_bh(&mv_chan->lock);
|
|
|
|
- __mv_xor_slot_cleanup(mv_chan);
|
|
|
|
- spin_unlock_bh(&mv_chan->lock);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static void mv_xor_tasklet(unsigned long data)
|
|
static void mv_xor_tasklet(unsigned long data)
|
|
{
|
|
{
|
|
struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
|
|
struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
|
|
|
|
+
|
|
|
|
+ spin_lock_bh(&chan->lock);
|
|
mv_xor_slot_cleanup(chan);
|
|
mv_xor_slot_cleanup(chan);
|
|
|
|
+ spin_unlock_bh(&chan->lock);
|
|
}
|
|
}
|
|
|
|
|
|
static struct mv_xor_desc_slot *
|
|
static struct mv_xor_desc_slot *
|
|
@@ -656,9 +652,10 @@ static void mv_xor_free_chan_resources(struct dma_chan *chan)
|
|
struct mv_xor_desc_slot *iter, *_iter;
|
|
struct mv_xor_desc_slot *iter, *_iter;
|
|
int in_use_descs = 0;
|
|
int in_use_descs = 0;
|
|
|
|
|
|
|
|
+ spin_lock_bh(&mv_chan->lock);
|
|
|
|
+
|
|
mv_xor_slot_cleanup(mv_chan);
|
|
mv_xor_slot_cleanup(mv_chan);
|
|
|
|
|
|
- spin_lock_bh(&mv_chan->lock);
|
|
|
|
list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
|
|
list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
|
|
chain_node) {
|
|
chain_node) {
|
|
in_use_descs++;
|
|
in_use_descs++;
|
|
@@ -700,11 +697,12 @@ static enum dma_status mv_xor_status(struct dma_chan *chan,
|
|
enum dma_status ret;
|
|
enum dma_status ret;
|
|
|
|
|
|
ret = dma_cookie_status(chan, cookie, txstate);
|
|
ret = dma_cookie_status(chan, cookie, txstate);
|
|
- if (ret == DMA_COMPLETE) {
|
|
|
|
- mv_xor_clean_completed_slots(mv_chan);
|
|
|
|
|
|
+ if (ret == DMA_COMPLETE)
|
|
return ret;
|
|
return ret;
|
|
- }
|
|
|
|
|
|
+
|
|
|
|
+ spin_lock_bh(&mv_chan->lock);
|
|
mv_xor_slot_cleanup(mv_chan);
|
|
mv_xor_slot_cleanup(mv_chan);
|
|
|
|
+ spin_unlock_bh(&mv_chan->lock);
|
|
|
|
|
|
return dma_cookie_status(chan, cookie, txstate);
|
|
return dma_cookie_status(chan, cookie, txstate);
|
|
}
|
|
}
|
|
@@ -782,7 +780,7 @@ static void mv_xor_issue_pending(struct dma_chan *chan)
|
|
|
|
|
|
static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
|
|
static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
|
|
{
|
|
{
|
|
- int i;
|
|
|
|
|
|
+ int i, ret;
|
|
void *src, *dest;
|
|
void *src, *dest;
|
|
dma_addr_t src_dma, dest_dma;
|
|
dma_addr_t src_dma, dest_dma;
|
|
struct dma_chan *dma_chan;
|
|
struct dma_chan *dma_chan;
|
|
@@ -819,19 +817,44 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
|
|
|
|
|
|
src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
|
|
src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
|
|
PAGE_SIZE, DMA_TO_DEVICE);
|
|
PAGE_SIZE, DMA_TO_DEVICE);
|
|
- unmap->to_cnt = 1;
|
|
|
|
unmap->addr[0] = src_dma;
|
|
unmap->addr[0] = src_dma;
|
|
|
|
|
|
|
|
+ ret = dma_mapping_error(dma_chan->device->dev, src_dma);
|
|
|
|
+ if (ret) {
|
|
|
|
+ err = -ENOMEM;
|
|
|
|
+ goto free_resources;
|
|
|
|
+ }
|
|
|
|
+ unmap->to_cnt = 1;
|
|
|
|
+
|
|
dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
|
|
dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
|
|
PAGE_SIZE, DMA_FROM_DEVICE);
|
|
PAGE_SIZE, DMA_FROM_DEVICE);
|
|
- unmap->from_cnt = 1;
|
|
|
|
unmap->addr[1] = dest_dma;
|
|
unmap->addr[1] = dest_dma;
|
|
|
|
|
|
|
|
+ ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
|
|
|
|
+ if (ret) {
|
|
|
|
+ err = -ENOMEM;
|
|
|
|
+ goto free_resources;
|
|
|
|
+ }
|
|
|
|
+ unmap->from_cnt = 1;
|
|
unmap->len = PAGE_SIZE;
|
|
unmap->len = PAGE_SIZE;
|
|
|
|
|
|
tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
|
|
tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
|
|
PAGE_SIZE, 0);
|
|
PAGE_SIZE, 0);
|
|
|
|
+ if (!tx) {
|
|
|
|
+ dev_err(dma_chan->device->dev,
|
|
|
|
+ "Self-test cannot prepare operation, disabling\n");
|
|
|
|
+ err = -ENODEV;
|
|
|
|
+ goto free_resources;
|
|
|
|
+ }
|
|
|
|
+
|
|
cookie = mv_xor_tx_submit(tx);
|
|
cookie = mv_xor_tx_submit(tx);
|
|
|
|
+ if (dma_submit_error(cookie)) {
|
|
|
|
+ dev_err(dma_chan->device->dev,
|
|
|
|
+ "Self-test submit error, disabling\n");
|
|
|
|
+ err = -ENODEV;
|
|
|
|
+ goto free_resources;
|
|
|
|
+ }
|
|
|
|
+
|
|
mv_xor_issue_pending(dma_chan);
|
|
mv_xor_issue_pending(dma_chan);
|
|
async_tx_ack(tx);
|
|
async_tx_ack(tx);
|
|
msleep(1);
|
|
msleep(1);
|
|
@@ -866,7 +889,7 @@ out:
|
|
static int
|
|
static int
|
|
mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
|
|
mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
|
|
{
|
|
{
|
|
- int i, src_idx;
|
|
|
|
|
|
+ int i, src_idx, ret;
|
|
struct page *dest;
|
|
struct page *dest;
|
|
struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
|
|
struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
|
|
dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
|
|
dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
|
|
@@ -929,19 +952,42 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
|
|
unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
|
|
unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
|
|
0, PAGE_SIZE, DMA_TO_DEVICE);
|
|
0, PAGE_SIZE, DMA_TO_DEVICE);
|
|
dma_srcs[i] = unmap->addr[i];
|
|
dma_srcs[i] = unmap->addr[i];
|
|
|
|
+ ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
|
|
|
|
+ if (ret) {
|
|
|
|
+ err = -ENOMEM;
|
|
|
|
+ goto free_resources;
|
|
|
|
+ }
|
|
unmap->to_cnt++;
|
|
unmap->to_cnt++;
|
|
}
|
|
}
|
|
|
|
|
|
unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
|
|
unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
|
|
DMA_FROM_DEVICE);
|
|
DMA_FROM_DEVICE);
|
|
dest_dma = unmap->addr[src_count];
|
|
dest_dma = unmap->addr[src_count];
|
|
|
|
+ ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
|
|
|
|
+ if (ret) {
|
|
|
|
+ err = -ENOMEM;
|
|
|
|
+ goto free_resources;
|
|
|
|
+ }
|
|
unmap->from_cnt = 1;
|
|
unmap->from_cnt = 1;
|
|
unmap->len = PAGE_SIZE;
|
|
unmap->len = PAGE_SIZE;
|
|
|
|
|
|
tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
|
|
tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
|
|
src_count, PAGE_SIZE, 0);
|
|
src_count, PAGE_SIZE, 0);
|
|
|
|
+ if (!tx) {
|
|
|
|
+ dev_err(dma_chan->device->dev,
|
|
|
|
+ "Self-test cannot prepare operation, disabling\n");
|
|
|
|
+ err = -ENODEV;
|
|
|
|
+ goto free_resources;
|
|
|
|
+ }
|
|
|
|
|
|
cookie = mv_xor_tx_submit(tx);
|
|
cookie = mv_xor_tx_submit(tx);
|
|
|
|
+ if (dma_submit_error(cookie)) {
|
|
|
|
+ dev_err(dma_chan->device->dev,
|
|
|
|
+ "Self-test submit error, disabling\n");
|
|
|
|
+ err = -ENODEV;
|
|
|
|
+ goto free_resources;
|
|
|
|
+ }
|
|
|
|
+
|
|
mv_xor_issue_pending(dma_chan);
|
|
mv_xor_issue_pending(dma_chan);
|
|
async_tx_ack(tx);
|
|
async_tx_ack(tx);
|
|
msleep(8);
|
|
msleep(8);
|