|
@@ -55,22 +55,35 @@
|
|
|
#define CPUCFG_CX_RST_CTRL_L2_RST BIT(8)
|
|
|
#define CPUCFG_CX_RST_CTRL_CX_RST(n) BIT(4 + (n))
|
|
|
#define CPUCFG_CX_RST_CTRL_CORE_RST(n) BIT(n)
|
|
|
+#define CPUCFG_CX_RST_CTRL_CORE_RST_ALL (0xf << 0)
|
|
|
|
|
|
#define PRCM_CPU_PO_RST_CTRL(c) (0x4 + 0x4 * (c))
|
|
|
#define PRCM_CPU_PO_RST_CTRL_CORE(n) BIT(n)
|
|
|
#define PRCM_CPU_PO_RST_CTRL_CORE_ALL 0xf
|
|
|
#define PRCM_PWROFF_GATING_REG(c) (0x100 + 0x4 * (c))
|
|
|
-#define PRCM_PWROFF_GATING_REG_CLUSTER BIT(4)
|
|
|
+/* The power off register for clusters are different from a80 and a83t */
|
|
|
+#define PRCM_PWROFF_GATING_REG_CLUSTER_SUN8I BIT(0)
|
|
|
+#define PRCM_PWROFF_GATING_REG_CLUSTER_SUN9I BIT(4)
|
|
|
#define PRCM_PWROFF_GATING_REG_CORE(n) BIT(n)
|
|
|
#define PRCM_PWR_SWITCH_REG(c, cpu) (0x140 + 0x10 * (c) + 0x4 * (cpu))
|
|
|
#define PRCM_CPU_SOFT_ENTRY_REG 0x164
|
|
|
|
|
|
+/* R_CPUCFG registers, specific to sun8i-a83t */
|
|
|
+#define R_CPUCFG_CLUSTER_PO_RST_CTRL(c) (0x30 + (c) * 0x4)
|
|
|
+#define R_CPUCFG_CLUSTER_PO_RST_CTRL_CORE(n) BIT(n)
|
|
|
+#define R_CPUCFG_CPU_SOFT_ENTRY_REG 0x01a4
|
|
|
+
|
|
|
#define CPU0_SUPPORT_HOTPLUG_MAGIC0 0xFA50392F
|
|
|
#define CPU0_SUPPORT_HOTPLUG_MAGIC1 0x790DCA3A
|
|
|
|
|
|
static void __iomem *cpucfg_base;
|
|
|
static void __iomem *prcm_base;
|
|
|
static void __iomem *sram_b_smp_base;
|
|
|
+static void __iomem *r_cpucfg_base;
|
|
|
+
|
|
|
+extern void sunxi_mc_smp_secondary_startup(void);
|
|
|
+extern void sunxi_mc_smp_resume(void);
|
|
|
+static bool is_a83t;
|
|
|
|
|
|
static bool sunxi_core_is_cortex_a15(unsigned int core, unsigned int cluster)
|
|
|
{
|
|
@@ -157,6 +170,16 @@ static int sunxi_cpu_powerup(unsigned int cpu, unsigned int cluster)
|
|
|
reg &= ~PRCM_CPU_PO_RST_CTRL_CORE(cpu);
|
|
|
writel(reg, prcm_base + PRCM_CPU_PO_RST_CTRL(cluster));
|
|
|
|
|
|
+ if (is_a83t) {
|
|
|
+ /* assert cpu power-on reset */
|
|
|
+ reg = readl(r_cpucfg_base +
|
|
|
+ R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster));
|
|
|
+ reg &= ~(R_CPUCFG_CLUSTER_PO_RST_CTRL_CORE(cpu));
|
|
|
+ writel(reg, r_cpucfg_base +
|
|
|
+ R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster));
|
|
|
+ udelay(10);
|
|
|
+ }
|
|
|
+
|
|
|
/* Cortex-A7: hold L1 reset disable signal low */
|
|
|
if (!sunxi_core_is_cortex_a15(cpu, cluster)) {
|
|
|
reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster));
|
|
@@ -180,17 +203,38 @@ static int sunxi_cpu_powerup(unsigned int cpu, unsigned int cluster)
|
|
|
/* open power switch */
|
|
|
sunxi_cpu_power_switch_set(cpu, cluster, true);
|
|
|
|
|
|
+ /* Handle A83T bit swap */
|
|
|
+ if (is_a83t) {
|
|
|
+ if (cpu == 0)
|
|
|
+ cpu = 4;
|
|
|
+ }
|
|
|
+
|
|
|
/* clear processor power gate */
|
|
|
reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster));
|
|
|
reg &= ~PRCM_PWROFF_GATING_REG_CORE(cpu);
|
|
|
writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster));
|
|
|
udelay(20);
|
|
|
|
|
|
+ /* Handle A83T bit swap */
|
|
|
+ if (is_a83t) {
|
|
|
+ if (cpu == 4)
|
|
|
+ cpu = 0;
|
|
|
+ }
|
|
|
+
|
|
|
/* de-assert processor power-on reset */
|
|
|
reg = readl(prcm_base + PRCM_CPU_PO_RST_CTRL(cluster));
|
|
|
reg |= PRCM_CPU_PO_RST_CTRL_CORE(cpu);
|
|
|
writel(reg, prcm_base + PRCM_CPU_PO_RST_CTRL(cluster));
|
|
|
|
|
|
+ if (is_a83t) {
|
|
|
+ reg = readl(r_cpucfg_base +
|
|
|
+ R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster));
|
|
|
+ reg |= R_CPUCFG_CLUSTER_PO_RST_CTRL_CORE(cpu);
|
|
|
+ writel(reg, r_cpucfg_base +
|
|
|
+ R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster));
|
|
|
+ udelay(10);
|
|
|
+ }
|
|
|
+
|
|
|
/* de-assert all processor resets */
|
|
|
reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
|
|
|
reg |= CPUCFG_CX_RST_CTRL_DBG_RST(cpu);
|
|
@@ -212,6 +256,14 @@ static int sunxi_cluster_powerup(unsigned int cluster)
|
|
|
if (cluster >= SUNXI_NR_CLUSTERS)
|
|
|
return -EINVAL;
|
|
|
|
|
|
+ /* For A83T, assert cluster cores resets */
|
|
|
+ if (is_a83t) {
|
|
|
+ reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
|
|
|
+ reg &= ~CPUCFG_CX_RST_CTRL_CORE_RST_ALL; /* Core Reset */
|
|
|
+ writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
|
|
|
+ udelay(10);
|
|
|
+ }
|
|
|
+
|
|
|
/* assert ACINACTM */
|
|
|
reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster));
|
|
|
reg |= CPUCFG_CX_CTRL_REG1_ACINACTM;
|
|
@@ -222,6 +274,16 @@ static int sunxi_cluster_powerup(unsigned int cluster)
|
|
|
reg &= ~PRCM_CPU_PO_RST_CTRL_CORE_ALL;
|
|
|
writel(reg, prcm_base + PRCM_CPU_PO_RST_CTRL(cluster));
|
|
|
|
|
|
+ /* assert cluster cores resets */
|
|
|
+ if (is_a83t) {
|
|
|
+ reg = readl(r_cpucfg_base +
|
|
|
+ R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster));
|
|
|
+ reg &= ~CPUCFG_CX_RST_CTRL_CORE_RST_ALL;
|
|
|
+ writel(reg, r_cpucfg_base +
|
|
|
+ R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster));
|
|
|
+ udelay(10);
|
|
|
+ }
|
|
|
+
|
|
|
/* assert cluster resets */
|
|
|
reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
|
|
|
reg &= ~CPUCFG_CX_RST_CTRL_DBG_SOC_RST;
|
|
@@ -252,7 +314,10 @@ static int sunxi_cluster_powerup(unsigned int cluster)
|
|
|
|
|
|
/* clear cluster power gate */
|
|
|
reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster));
|
|
|
- reg &= ~PRCM_PWROFF_GATING_REG_CLUSTER;
|
|
|
+ if (is_a83t)
|
|
|
+ reg &= ~PRCM_PWROFF_GATING_REG_CLUSTER_SUN8I;
|
|
|
+ else
|
|
|
+ reg &= ~PRCM_PWROFF_GATING_REG_CLUSTER_SUN9I;
|
|
|
writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster));
|
|
|
udelay(20);
|
|
|
|
|
@@ -300,74 +365,7 @@ static void sunxi_cluster_cache_disable_without_axi(void)
|
|
|
}
|
|
|
|
|
|
static int sunxi_mc_smp_cpu_table[SUNXI_NR_CLUSTERS][SUNXI_CPUS_PER_CLUSTER];
|
|
|
-static int sunxi_mc_smp_first_comer;
|
|
|
-
|
|
|
-/*
|
|
|
- * Enable cluster-level coherency, in preparation for turning on the MMU.
|
|
|
- *
|
|
|
- * Also enable regional clock gating and L2 data latency settings for
|
|
|
- * Cortex-A15. These settings are from the vendor kernel.
|
|
|
- */
|
|
|
-static void __naked sunxi_mc_smp_cluster_cache_enable(void)
|
|
|
-{
|
|
|
- asm volatile (
|
|
|
- "mrc p15, 0, r1, c0, c0, 0\n"
|
|
|
- "movw r2, #" __stringify(ARM_CPU_PART_MASK & 0xffff) "\n"
|
|
|
- "movt r2, #" __stringify(ARM_CPU_PART_MASK >> 16) "\n"
|
|
|
- "and r1, r1, r2\n"
|
|
|
- "movw r2, #" __stringify(ARM_CPU_PART_CORTEX_A15 & 0xffff) "\n"
|
|
|
- "movt r2, #" __stringify(ARM_CPU_PART_CORTEX_A15 >> 16) "\n"
|
|
|
- "cmp r1, r2\n"
|
|
|
- "bne not_a15\n"
|
|
|
-
|
|
|
- /* The following is Cortex-A15 specific */
|
|
|
-
|
|
|
- /* ACTLR2: Enable CPU regional clock gates */
|
|
|
- "mrc p15, 1, r1, c15, c0, 4\n"
|
|
|
- "orr r1, r1, #(0x1<<31)\n"
|
|
|
- "mcr p15, 1, r1, c15, c0, 4\n"
|
|
|
-
|
|
|
- /* L2ACTLR */
|
|
|
- "mrc p15, 1, r1, c15, c0, 0\n"
|
|
|
- /* Enable L2, GIC, and Timer regional clock gates */
|
|
|
- "orr r1, r1, #(0x1<<26)\n"
|
|
|
- /* Disable clean/evict from being pushed to external */
|
|
|
- "orr r1, r1, #(0x1<<3)\n"
|
|
|
- "mcr p15, 1, r1, c15, c0, 0\n"
|
|
|
-
|
|
|
- /* L2CTRL: L2 data RAM latency */
|
|
|
- "mrc p15, 1, r1, c9, c0, 2\n"
|
|
|
- "bic r1, r1, #(0x7<<0)\n"
|
|
|
- "orr r1, r1, #(0x3<<0)\n"
|
|
|
- "mcr p15, 1, r1, c9, c0, 2\n"
|
|
|
-
|
|
|
- /* End of Cortex-A15 specific setup */
|
|
|
- "not_a15:\n"
|
|
|
-
|
|
|
- /* Get value of sunxi_mc_smp_first_comer */
|
|
|
- "adr r1, first\n"
|
|
|
- "ldr r0, [r1]\n"
|
|
|
- "ldr r0, [r1, r0]\n"
|
|
|
-
|
|
|
- /* Skip cci_enable_port_for_self if not first comer */
|
|
|
- "cmp r0, #0\n"
|
|
|
- "bxeq lr\n"
|
|
|
- "b cci_enable_port_for_self\n"
|
|
|
-
|
|
|
- ".align 2\n"
|
|
|
- "first: .word sunxi_mc_smp_first_comer - .\n"
|
|
|
- );
|
|
|
-}
|
|
|
-
|
|
|
-static void __naked sunxi_mc_smp_secondary_startup(void)
|
|
|
-{
|
|
|
- asm volatile(
|
|
|
- "bl sunxi_mc_smp_cluster_cache_enable\n"
|
|
|
- "b secondary_startup"
|
|
|
- /* Let compiler know about sunxi_mc_smp_cluster_cache_enable */
|
|
|
- :: "i" (sunxi_mc_smp_cluster_cache_enable)
|
|
|
- );
|
|
|
-}
|
|
|
+int sunxi_mc_smp_first_comer;
|
|
|
|
|
|
static DEFINE_SPINLOCK(boot_lock);
|
|
|
|
|
@@ -516,7 +514,10 @@ static int sunxi_cluster_powerdown(unsigned int cluster)
|
|
|
/* gate cluster power */
|
|
|
pr_debug("%s: gate cluster power\n", __func__);
|
|
|
reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster));
|
|
|
- reg |= PRCM_PWROFF_GATING_REG_CLUSTER;
|
|
|
+ if (is_a83t)
|
|
|
+ reg |= PRCM_PWROFF_GATING_REG_CLUSTER_SUN8I;
|
|
|
+ else
|
|
|
+ reg |= PRCM_PWROFF_GATING_REG_CLUSTER_SUN9I;
|
|
|
writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster));
|
|
|
udelay(20);
|
|
|
|
|
@@ -598,8 +599,12 @@ out:
|
|
|
return !ret;
|
|
|
}
|
|
|
|
|
|
-static bool sunxi_mc_smp_cpu_can_disable(unsigned int __unused)
|
|
|
+static bool sunxi_mc_smp_cpu_can_disable(unsigned int cpu)
|
|
|
{
|
|
|
+ /* CPU0 hotplug not handled for sun8i-a83t */
|
|
|
+ if (is_a83t)
|
|
|
+ if (cpu == 0)
|
|
|
+ return false;
|
|
|
return true;
|
|
|
}
|
|
|
#endif
|
|
@@ -637,16 +642,6 @@ static bool __init sunxi_mc_smp_cpu_table_init(void)
|
|
|
*/
|
|
|
typedef typeof(cpu_reset) phys_reset_t;
|
|
|
|
|
|
-static void __init __naked sunxi_mc_smp_resume(void)
|
|
|
-{
|
|
|
- asm volatile(
|
|
|
- "bl sunxi_mc_smp_cluster_cache_enable\n"
|
|
|
- "b cpu_resume"
|
|
|
- /* Let compiler know about sunxi_mc_smp_cluster_cache_enable */
|
|
|
- :: "i" (sunxi_mc_smp_cluster_cache_enable)
|
|
|
- );
|
|
|
-}
|
|
|
-
|
|
|
static int __init nocache_trampoline(unsigned long __unused)
|
|
|
{
|
|
|
phys_reset_t phys_reset;
|
|
@@ -692,12 +687,14 @@ struct sunxi_mc_smp_nodes {
|
|
|
struct device_node *prcm_node;
|
|
|
struct device_node *cpucfg_node;
|
|
|
struct device_node *sram_node;
|
|
|
+ struct device_node *r_cpucfg_node;
|
|
|
};
|
|
|
|
|
|
/* This structure holds SoC-specific bits tied to an enable-method string. */
|
|
|
struct sunxi_mc_smp_data {
|
|
|
const char *enable_method;
|
|
|
int (*get_smp_nodes)(struct sunxi_mc_smp_nodes *nodes);
|
|
|
+ bool is_a83t;
|
|
|
};
|
|
|
|
|
|
static void __init sunxi_mc_smp_put_nodes(struct sunxi_mc_smp_nodes *nodes)
|
|
@@ -705,6 +702,7 @@ static void __init sunxi_mc_smp_put_nodes(struct sunxi_mc_smp_nodes *nodes)
|
|
|
of_node_put(nodes->prcm_node);
|
|
|
of_node_put(nodes->cpucfg_node);
|
|
|
of_node_put(nodes->sram_node);
|
|
|
+ of_node_put(nodes->r_cpucfg_node);
|
|
|
memset(nodes, 0, sizeof(*nodes));
|
|
|
}
|
|
|
|
|
@@ -734,11 +732,42 @@ static int __init sun9i_a80_get_smp_nodes(struct sunxi_mc_smp_nodes *nodes)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static int __init sun8i_a83t_get_smp_nodes(struct sunxi_mc_smp_nodes *nodes)
|
|
|
+{
|
|
|
+ nodes->prcm_node = of_find_compatible_node(NULL, NULL,
|
|
|
+ "allwinner,sun8i-a83t-r-ccu");
|
|
|
+ if (!nodes->prcm_node) {
|
|
|
+ pr_err("%s: PRCM not available\n", __func__);
|
|
|
+ return -ENODEV;
|
|
|
+ }
|
|
|
+
|
|
|
+ nodes->cpucfg_node = of_find_compatible_node(NULL, NULL,
|
|
|
+ "allwinner,sun8i-a83t-cpucfg");
|
|
|
+ if (!nodes->cpucfg_node) {
|
|
|
+ pr_err("%s: CPUCFG not available\n", __func__);
|
|
|
+ return -ENODEV;
|
|
|
+ }
|
|
|
+
|
|
|
+ nodes->r_cpucfg_node = of_find_compatible_node(NULL, NULL,
|
|
|
+ "allwinner,sun8i-a83t-r-cpucfg");
|
|
|
+ if (!nodes->r_cpucfg_node) {
|
|
|
+ pr_err("%s: RCPUCFG not available\n", __func__);
|
|
|
+ return -ENODEV;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static const struct sunxi_mc_smp_data sunxi_mc_smp_data[] __initconst = {
|
|
|
{
|
|
|
.enable_method = "allwinner,sun9i-a80-smp",
|
|
|
.get_smp_nodes = sun9i_a80_get_smp_nodes,
|
|
|
},
|
|
|
+ {
|
|
|
+ .enable_method = "allwinner,sun8i-a83t-smp",
|
|
|
+ .get_smp_nodes = sun8i_a83t_get_smp_nodes,
|
|
|
+ .is_a83t = true,
|
|
|
+ },
|
|
|
};
|
|
|
|
|
|
static int __init sunxi_mc_smp_init(void)
|
|
@@ -746,6 +775,7 @@ static int __init sunxi_mc_smp_init(void)
|
|
|
struct sunxi_mc_smp_nodes nodes = { 0 };
|
|
|
struct device_node *node;
|
|
|
struct resource res;
|
|
|
+ void __iomem *addr;
|
|
|
int i, ret;
|
|
|
|
|
|
/*
|
|
@@ -771,6 +801,8 @@ static int __init sunxi_mc_smp_init(void)
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
+ is_a83t = sunxi_mc_smp_data[i].is_a83t;
|
|
|
+
|
|
|
of_node_put(node);
|
|
|
if (ret)
|
|
|
return -ENODEV;
|
|
@@ -808,12 +840,23 @@ static int __init sunxi_mc_smp_init(void)
|
|
|
goto err_unmap_prcm;
|
|
|
}
|
|
|
|
|
|
- sram_b_smp_base = of_io_request_and_map(nodes.sram_node, 0,
|
|
|
- "sunxi-mc-smp");
|
|
|
- if (IS_ERR(sram_b_smp_base)) {
|
|
|
- ret = PTR_ERR(sram_b_smp_base);
|
|
|
- pr_err("%s: failed to map secure SRAM\n", __func__);
|
|
|
- goto err_unmap_release_cpucfg;
|
|
|
+ if (is_a83t) {
|
|
|
+ r_cpucfg_base = of_io_request_and_map(nodes.r_cpucfg_node,
|
|
|
+ 0, "sunxi-mc-smp");
|
|
|
+ if (IS_ERR(r_cpucfg_base)) {
|
|
|
+ ret = PTR_ERR(r_cpucfg_base);
|
|
|
+ pr_err("%s: failed to map R-CPUCFG registers\n",
|
|
|
+ __func__);
|
|
|
+ goto err_unmap_release_cpucfg;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ sram_b_smp_base = of_io_request_and_map(nodes.sram_node, 0,
|
|
|
+ "sunxi-mc-smp");
|
|
|
+ if (IS_ERR(sram_b_smp_base)) {
|
|
|
+ ret = PTR_ERR(sram_b_smp_base);
|
|
|
+ pr_err("%s: failed to map secure SRAM\n", __func__);
|
|
|
+ goto err_unmap_release_cpucfg;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/* Configure CCI-400 for boot cluster */
|
|
@@ -821,15 +864,18 @@ static int __init sunxi_mc_smp_init(void)
|
|
|
if (ret) {
|
|
|
pr_err("%s: failed to configure boot cluster: %d\n",
|
|
|
__func__, ret);
|
|
|
- goto err_unmap_release_secure_sram;
|
|
|
+ goto err_unmap_release_sram_rcpucfg;
|
|
|
}
|
|
|
|
|
|
/* We don't need the device nodes anymore */
|
|
|
sunxi_mc_smp_put_nodes(&nodes);
|
|
|
|
|
|
/* Set the hardware entry point address */
|
|
|
- writel(__pa_symbol(sunxi_mc_smp_secondary_startup),
|
|
|
- prcm_base + PRCM_CPU_SOFT_ENTRY_REG);
|
|
|
+ if (is_a83t)
|
|
|
+ addr = r_cpucfg_base + R_CPUCFG_CPU_SOFT_ENTRY_REG;
|
|
|
+ else
|
|
|
+ addr = prcm_base + PRCM_CPU_SOFT_ENTRY_REG;
|
|
|
+ writel(__pa_symbol(sunxi_mc_smp_secondary_startup), addr);
|
|
|
|
|
|
/* Actually enable multi cluster SMP */
|
|
|
smp_set_ops(&sunxi_mc_smp_smp_ops);
|
|
@@ -838,9 +884,14 @@ static int __init sunxi_mc_smp_init(void)
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
-err_unmap_release_secure_sram:
|
|
|
- iounmap(sram_b_smp_base);
|
|
|
- of_address_to_resource(nodes.sram_node, 0, &res);
|
|
|
+err_unmap_release_sram_rcpucfg:
|
|
|
+ if (is_a83t) {
|
|
|
+ iounmap(r_cpucfg_base);
|
|
|
+ of_address_to_resource(nodes.r_cpucfg_node, 0, &res);
|
|
|
+ } else {
|
|
|
+ iounmap(sram_b_smp_base);
|
|
|
+ of_address_to_resource(nodes.sram_node, 0, &res);
|
|
|
+ }
|
|
|
release_mem_region(res.start, resource_size(&res));
|
|
|
err_unmap_release_cpucfg:
|
|
|
iounmap(cpucfg_base);
|