|
@@ -99,12 +99,28 @@ static inline void free_lppacas(void) { }
|
|
|
* 3 persistent SLBs are registered here. The buffer will be zero
|
|
|
* initially, hence will all be invaild until we actually write them.
|
|
|
*/
|
|
|
-static struct slb_shadow slb_shadow[] __cacheline_aligned = {
|
|
|
- [0 ... (NR_CPUS-1)] = {
|
|
|
- .persistent = cpu_to_be32(SLB_NUM_BOLTED),
|
|
|
- .buffer_length = cpu_to_be32(sizeof(struct slb_shadow)),
|
|
|
- },
|
|
|
-};
|
|
|
+static struct slb_shadow *slb_shadow;
|
|
|
+
|
|
|
+static void __init allocate_slb_shadows(int nr_cpus, int limit)
|
|
|
+{
|
|
|
+ int size = PAGE_ALIGN(sizeof(struct slb_shadow) * nr_cpus);
|
|
|
+ slb_shadow = __va(memblock_alloc_base(size, PAGE_SIZE, limit));
|
|
|
+ memset(slb_shadow, 0, size);
|
|
|
+}
|
|
|
+
|
|
|
+static struct slb_shadow * __init init_slb_shadow(int cpu)
|
|
|
+{
|
|
|
+ struct slb_shadow *s = &slb_shadow[cpu];
|
|
|
+
|
|
|
+ s->persistent = cpu_to_be32(SLB_NUM_BOLTED);
|
|
|
+ s->buffer_length = cpu_to_be32(sizeof(*s));
|
|
|
+
|
|
|
+ return s;
|
|
|
+}
|
|
|
+
|
|
|
+#else /* CONFIG_PPC_STD_MMU_64 */
|
|
|
+
|
|
|
+static void __init allocate_slb_shadows(int nr_cpus, int limit) { }
|
|
|
|
|
|
#endif /* CONFIG_PPC_STD_MMU_64 */
|
|
|
|
|
@@ -142,7 +158,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
|
|
|
new_paca->__current = &init_task;
|
|
|
new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL;
|
|
|
#ifdef CONFIG_PPC_STD_MMU_64
|
|
|
- new_paca->slb_shadow_ptr = &slb_shadow[cpu];
|
|
|
+ new_paca->slb_shadow_ptr = init_slb_shadow(cpu);
|
|
|
#endif /* CONFIG_PPC_STD_MMU_64 */
|
|
|
}
|
|
|
|
|
@@ -190,6 +206,8 @@ void __init allocate_pacas(void)
|
|
|
|
|
|
allocate_lppacas(nr_cpu_ids, limit);
|
|
|
|
|
|
+ allocate_slb_shadows(nr_cpu_ids, limit);
|
|
|
+
|
|
|
/* Can't use for_each_*_cpu, as they aren't functional yet */
|
|
|
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
|
|
|
initialise_paca(&paca[cpu], cpu);
|