|
@@ -31,6 +31,12 @@
|
|
|
#include "dmaengine.h"
|
|
|
#include "mv_xor.h"
|
|
|
|
|
|
+enum mv_xor_type {
|
|
|
+ XOR_ORION,
|
|
|
+ XOR_ARMADA_38X,
|
|
|
+ XOR_ARMADA_37XX,
|
|
|
+};
|
|
|
+
|
|
|
enum mv_xor_mode {
|
|
|
XOR_MODE_IN_REG,
|
|
|
XOR_MODE_IN_DESC,
|
|
@@ -477,7 +483,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
|
|
|
BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
|
|
|
|
|
|
dev_dbg(mv_chan_to_devp(mv_chan),
|
|
|
- "%s src_cnt: %d len: %u dest %pad flags: %ld\n",
|
|
|
+ "%s src_cnt: %d len: %zu dest %pad flags: %ld\n",
|
|
|
__func__, src_cnt, len, &dest, flags);
|
|
|
|
|
|
sw_desc = mv_chan_alloc_slot(mv_chan);
|
|
@@ -933,7 +939,7 @@ static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
|
|
|
static struct mv_xor_chan *
|
|
|
mv_xor_channel_add(struct mv_xor_device *xordev,
|
|
|
struct platform_device *pdev,
|
|
|
- int idx, dma_cap_mask_t cap_mask, int irq, int op_in_desc)
|
|
|
+ int idx, dma_cap_mask_t cap_mask, int irq)
|
|
|
{
|
|
|
int ret = 0;
|
|
|
struct mv_xor_chan *mv_chan;
|
|
@@ -945,7 +951,10 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
|
|
|
|
|
|
mv_chan->idx = idx;
|
|
|
mv_chan->irq = irq;
|
|
|
- mv_chan->op_in_desc = op_in_desc;
|
|
|
+ if (xordev->xor_type == XOR_ORION)
|
|
|
+ mv_chan->op_in_desc = XOR_MODE_IN_REG;
|
|
|
+ else
|
|
|
+ mv_chan->op_in_desc = XOR_MODE_IN_DESC;
|
|
|
|
|
|
dma_dev = &mv_chan->dmadev;
|
|
|
|
|
@@ -1085,6 +1094,33 @@ mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
|
|
|
writel(0, base + WINDOW_OVERRIDE_CTRL(1));
|
|
|
}
|
|
|
|
|
|
+static void
|
|
|
+mv_xor_conf_mbus_windows_a3700(struct mv_xor_device *xordev)
|
|
|
+{
|
|
|
+ void __iomem *base = xordev->xor_high_base;
|
|
|
+ u32 win_enable = 0;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for (i = 0; i < 8; i++) {
|
|
|
+ writel(0, base + WINDOW_BASE(i));
|
|
|
+ writel(0, base + WINDOW_SIZE(i));
|
|
|
+ if (i < 4)
|
|
|
+ writel(0, base + WINDOW_REMAP_HIGH(i));
|
|
|
+ }
|
|
|
+ /*
|
|
|
+ * For Armada3700 open default 4GB Mbus window. The dram
|
|
|
+ * related configuration are done at AXIS level.
|
|
|
+ */
|
|
|
+ writel(0xffff0000, base + WINDOW_SIZE(0));
|
|
|
+ win_enable |= 1;
|
|
|
+ win_enable |= 3 << 16;
|
|
|
+
|
|
|
+ writel(win_enable, base + WINDOW_BAR_ENABLE(0));
|
|
|
+ writel(win_enable, base + WINDOW_BAR_ENABLE(1));
|
|
|
+ writel(0, base + WINDOW_OVERRIDE_CTRL(0));
|
|
|
+ writel(0, base + WINDOW_OVERRIDE_CTRL(1));
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Since this XOR driver is basically used only for RAID5, we don't
|
|
|
* need to care about synchronizing ->suspend with DMA activity,
|
|
@@ -1129,6 +1165,11 @@ static int mv_xor_resume(struct platform_device *dev)
|
|
|
XOR_INTR_MASK(mv_chan));
|
|
|
}
|
|
|
|
|
|
+ if (xordev->xor_type == XOR_ARMADA_37XX) {
|
|
|
+ mv_xor_conf_mbus_windows_a3700(xordev);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
dram = mv_mbus_dram_info();
|
|
|
if (dram)
|
|
|
mv_xor_conf_mbus_windows(xordev, dram);
|
|
@@ -1137,8 +1178,9 @@ static int mv_xor_resume(struct platform_device *dev)
|
|
|
}
|
|
|
|
|
|
static const struct of_device_id mv_xor_dt_ids[] = {
|
|
|
- { .compatible = "marvell,orion-xor", .data = (void *)XOR_MODE_IN_REG },
|
|
|
- { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_MODE_IN_DESC },
|
|
|
+ { .compatible = "marvell,orion-xor", .data = (void *)XOR_ORION },
|
|
|
+ { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_ARMADA_38X },
|
|
|
+ { .compatible = "marvell,armada-3700-xor", .data = (void *)XOR_ARMADA_37XX },
|
|
|
{},
|
|
|
};
|
|
|
|
|
@@ -1152,7 +1194,6 @@ static int mv_xor_probe(struct platform_device *pdev)
|
|
|
struct resource *res;
|
|
|
unsigned int max_engines, max_channels;
|
|
|
int i, ret;
|
|
|
- int op_in_desc;
|
|
|
|
|
|
dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
|
|
|
|
|
@@ -1180,12 +1221,30 @@ static int mv_xor_probe(struct platform_device *pdev)
|
|
|
|
|
|
platform_set_drvdata(pdev, xordev);
|
|
|
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We need to know which type of XOR device we use before
|
|
|
+ * setting up. In non-dt case it can only be the legacy one.
|
|
|
+ */
|
|
|
+ xordev->xor_type = XOR_ORION;
|
|
|
+ if (pdev->dev.of_node) {
|
|
|
+ const struct of_device_id *of_id =
|
|
|
+ of_match_device(mv_xor_dt_ids,
|
|
|
+ &pdev->dev);
|
|
|
+
|
|
|
+ xordev->xor_type = (uintptr_t)of_id->data;
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* (Re-)program MBUS remapping windows if we are asked to.
|
|
|
*/
|
|
|
- dram = mv_mbus_dram_info();
|
|
|
- if (dram)
|
|
|
- mv_xor_conf_mbus_windows(xordev, dram);
|
|
|
+ if (xordev->xor_type == XOR_ARMADA_37XX) {
|
|
|
+ mv_xor_conf_mbus_windows_a3700(xordev);
|
|
|
+ } else {
|
|
|
+ dram = mv_mbus_dram_info();
|
|
|
+ if (dram)
|
|
|
+ mv_xor_conf_mbus_windows(xordev, dram);
|
|
|
+ }
|
|
|
|
|
|
/* Not all platforms can gate the clock, so it is not
|
|
|
* an error if the clock does not exists.
|
|
@@ -1199,12 +1258,16 @@ static int mv_xor_probe(struct platform_device *pdev)
|
|
|
* order for async_tx to perform well. So we limit the number
|
|
|
* of engines and channels so that we take into account this
|
|
|
* constraint. Note that we also want to use channels from
|
|
|
- * separate engines when possible.
|
|
|
+ * separate engines when possible. For dual-CPU Armada 3700
|
|
|
+ * SoC with single XOR engine allow using its both channels.
|
|
|
*/
|
|
|
max_engines = num_present_cpus();
|
|
|
- max_channels = min_t(unsigned int,
|
|
|
- MV_XOR_MAX_CHANNELS,
|
|
|
- DIV_ROUND_UP(num_present_cpus(), 2));
|
|
|
+ if (xordev->xor_type == XOR_ARMADA_37XX)
|
|
|
+ max_channels = num_present_cpus();
|
|
|
+ else
|
|
|
+ max_channels = min_t(unsigned int,
|
|
|
+ MV_XOR_MAX_CHANNELS,
|
|
|
+ DIV_ROUND_UP(num_present_cpus(), 2));
|
|
|
|
|
|
if (mv_xor_engine_count >= max_engines)
|
|
|
return 0;
|
|
@@ -1212,15 +1275,11 @@ static int mv_xor_probe(struct platform_device *pdev)
|
|
|
if (pdev->dev.of_node) {
|
|
|
struct device_node *np;
|
|
|
int i = 0;
|
|
|
- const struct of_device_id *of_id =
|
|
|
- of_match_device(mv_xor_dt_ids,
|
|
|
- &pdev->dev);
|
|
|
|
|
|
for_each_child_of_node(pdev->dev.of_node, np) {
|
|
|
struct mv_xor_chan *chan;
|
|
|
dma_cap_mask_t cap_mask;
|
|
|
int irq;
|
|
|
- op_in_desc = (int)of_id->data;
|
|
|
|
|
|
if (i >= max_channels)
|
|
|
continue;
|
|
@@ -1237,7 +1296,7 @@ static int mv_xor_probe(struct platform_device *pdev)
|
|
|
}
|
|
|
|
|
|
chan = mv_xor_channel_add(xordev, pdev, i,
|
|
|
- cap_mask, irq, op_in_desc);
|
|
|
+ cap_mask, irq);
|
|
|
if (IS_ERR(chan)) {
|
|
|
ret = PTR_ERR(chan);
|
|
|
irq_dispose_mapping(irq);
|
|
@@ -1266,8 +1325,7 @@ static int mv_xor_probe(struct platform_device *pdev)
|
|
|
}
|
|
|
|
|
|
chan = mv_xor_channel_add(xordev, pdev, i,
|
|
|
- cd->cap_mask, irq,
|
|
|
- XOR_MODE_IN_REG);
|
|
|
+ cd->cap_mask, irq);
|
|
|
if (IS_ERR(chan)) {
|
|
|
ret = PTR_ERR(chan);
|
|
|
goto err_channel_add;
|