|
@@ -51,12 +51,14 @@ static int dcscb_allcpus_mask[2];
|
|
static int dcscb_power_up(unsigned int cpu, unsigned int cluster)
|
|
static int dcscb_power_up(unsigned int cpu, unsigned int cluster)
|
|
{
|
|
{
|
|
unsigned int rst_hold, cpumask = (1 << cpu);
|
|
unsigned int rst_hold, cpumask = (1 << cpu);
|
|
- unsigned int all_mask = dcscb_allcpus_mask[cluster];
|
|
|
|
|
|
+ unsigned int all_mask;
|
|
|
|
|
|
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
|
|
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
|
|
if (cpu >= 4 || cluster >= 2)
|
|
if (cpu >= 4 || cluster >= 2)
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
+ all_mask = dcscb_allcpus_mask[cluster];
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Since this is called with IRQs enabled, and no arch_spin_lock_irq
|
|
* Since this is called with IRQs enabled, and no arch_spin_lock_irq
|
|
* variant exists, we need to disable IRQs manually here.
|
|
* variant exists, we need to disable IRQs manually here.
|
|
@@ -101,11 +103,12 @@ static void dcscb_power_down(void)
|
|
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
|
|
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
|
|
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
|
|
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
|
|
cpumask = (1 << cpu);
|
|
cpumask = (1 << cpu);
|
|
- all_mask = dcscb_allcpus_mask[cluster];
|
|
|
|
|
|
|
|
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
|
|
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
|
|
BUG_ON(cpu >= 4 || cluster >= 2);
|
|
BUG_ON(cpu >= 4 || cluster >= 2);
|
|
|
|
|
|
|
|
+ all_mask = dcscb_allcpus_mask[cluster];
|
|
|
|
+
|
|
__mcpm_cpu_going_down(cpu, cluster);
|
|
__mcpm_cpu_going_down(cpu, cluster);
|
|
|
|
|
|
arch_spin_lock(&dcscb_lock);
|
|
arch_spin_lock(&dcscb_lock);
|