|
@@ -565,25 +565,31 @@ void __init initialize_cache_info(void)
|
|
|
DBG(" <- initialize_cache_info()\n");
|
|
|
}
|
|
|
|
|
|
-/* This returns the limit below which memory accesses to the linear
|
|
|
- * mapping are guarnateed not to cause a TLB or SLB miss. This is
|
|
|
- * used to allocate interrupt or emergency stacks for which our
|
|
|
- * exception entry path doesn't deal with being interrupted.
|
|
|
+/*
|
|
|
+ * This returns the limit below which memory accesses to the linear
|
|
|
+ * mapping are guarnateed not to cause an architectural exception (e.g.,
|
|
|
+ * TLB or SLB miss fault).
|
|
|
+ *
|
|
|
+ * This is used to allocate PACAs and various interrupt stacks that
|
|
|
+ * that are accessed early in interrupt handlers that must not cause
|
|
|
+ * re-entrant interrupts.
|
|
|
*/
|
|
|
-static __init u64 safe_stack_limit(void)
|
|
|
+__init u64 ppc64_bolted_size(void)
|
|
|
{
|
|
|
#ifdef CONFIG_PPC_BOOK3E
|
|
|
/* Freescale BookE bolts the entire linear mapping */
|
|
|
- if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
|
|
|
+ /* XXX: BookE ppc64_rma_limit setup seems to disagree? */
|
|
|
+ if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E))
|
|
|
return linear_map_top;
|
|
|
/* Other BookE, we assume the first GB is bolted */
|
|
|
return 1ul << 30;
|
|
|
#else
|
|
|
+ /* BookS radix, does not take faults on linear mapping */
|
|
|
if (early_radix_enabled())
|
|
|
return ULONG_MAX;
|
|
|
|
|
|
- /* BookS, the first segment is bolted */
|
|
|
- if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
|
|
|
+ /* BookS hash, the first segment is bolted */
|
|
|
+ if (early_mmu_has_feature(MMU_FTR_1T_SEGMENT))
|
|
|
return 1UL << SID_SHIFT_1T;
|
|
|
return 1UL << SID_SHIFT;
|
|
|
#endif
|
|
@@ -591,7 +597,7 @@ static __init u64 safe_stack_limit(void)
|
|
|
|
|
|
void __init irqstack_early_init(void)
|
|
|
{
|
|
|
- u64 limit = safe_stack_limit();
|
|
|
+ u64 limit = ppc64_bolted_size();
|
|
|
unsigned int i;
|
|
|
|
|
|
/*
|
|
@@ -676,7 +682,7 @@ void __init emergency_stack_init(void)
|
|
|
* initialized in kernel/irq.c. These are initialized here in order
|
|
|
* to have emergency stacks available as early as possible.
|
|
|
*/
|
|
|
- limit = min(safe_stack_limit(), ppc64_rma_size);
|
|
|
+ limit = min(ppc64_bolted_size(), ppc64_rma_size);
|
|
|
|
|
|
for_each_possible_cpu(i) {
|
|
|
struct thread_info *ti;
|