|
@@ -311,6 +311,7 @@ static struct uasm_label labels[128];
|
|
static struct uasm_reloc relocs[128];
|
|
static struct uasm_reloc relocs[128];
|
|
|
|
|
|
static int check_for_high_segbits;
|
|
static int check_for_high_segbits;
|
|
|
|
+static bool fill_includes_sw_bits;
|
|
|
|
|
|
static unsigned int kscratch_used_mask;
|
|
static unsigned int kscratch_used_mask;
|
|
|
|
|
|
@@ -630,8 +631,14 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
|
|
static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
|
|
static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
|
|
unsigned int reg)
|
|
unsigned int reg)
|
|
{
|
|
{
|
|
- if (cpu_has_rixi) {
|
|
|
|
- UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
|
|
|
|
|
|
+ if (cpu_has_rixi && _PAGE_NO_EXEC) {
|
|
|
|
+ if (fill_includes_sw_bits) {
|
|
|
|
+ UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
|
|
|
|
+ } else {
|
|
|
|
+ UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC));
|
|
|
|
+ UASM_i_ROTR(p, reg, reg,
|
|
|
|
+ ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
|
|
|
|
+ }
|
|
} else {
|
|
} else {
|
|
#ifdef CONFIG_PHYS_ADDR_T_64BIT
|
|
#ifdef CONFIG_PHYS_ADDR_T_64BIT
|
|
uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
|
|
uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
|
|
@@ -2338,6 +2345,41 @@ static void config_xpa_params(void)
|
|
#endif
|
|
#endif
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void check_pabits(void)
|
|
|
|
+{
|
|
|
|
+ unsigned long entry;
|
|
|
|
+ unsigned pabits, fillbits;
|
|
|
|
+
|
|
|
|
+ if (!cpu_has_rixi || !_PAGE_NO_EXEC) {
|
|
|
|
+ /*
|
|
|
|
+ * We'll only be making use of the fact that we can rotate bits
|
|
|
|
+ * into the fill if the CPU supports RIXI, so don't bother
|
|
|
|
+ * probing this for CPUs which don't.
|
|
|
|
+ */
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ write_c0_entrylo0(~0ul);
|
|
|
|
+ back_to_back_c0_hazard();
|
|
|
|
+ entry = read_c0_entrylo0();
|
|
|
|
+
|
|
|
|
+ /* clear all non-PFN bits */
|
|
|
|
+ entry &= ~((1 << MIPS_ENTRYLO_PFN_SHIFT) - 1);
|
|
|
|
+ entry &= ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
|
|
|
|
+
|
|
|
|
+ /* find a lower bound on PABITS, and upper bound on fill bits */
|
|
|
|
+ pabits = fls_long(entry) + 6;
|
|
|
|
+ fillbits = max_t(int, (int)BITS_PER_LONG - pabits, 0);
|
|
|
|
+
|
|
|
|
+ /* minus the RI & XI bits */
|
|
|
|
+ fillbits -= min_t(unsigned, fillbits, 2);
|
|
|
|
+
|
|
|
|
+ if (fillbits >= ilog2(_PAGE_NO_EXEC))
|
|
|
|
+ fill_includes_sw_bits = true;
|
|
|
|
+
|
|
|
|
+ pr_debug("Entry* registers contain %u fill bits\n", fillbits);
|
|
|
|
+}
|
|
|
|
+
|
|
void build_tlb_refill_handler(void)
|
|
void build_tlb_refill_handler(void)
|
|
{
|
|
{
|
|
/*
|
|
/*
|
|
@@ -2348,6 +2390,7 @@ void build_tlb_refill_handler(void)
|
|
static int run_once = 0;
|
|
static int run_once = 0;
|
|
|
|
|
|
output_pgtable_bits_defines();
|
|
output_pgtable_bits_defines();
|
|
|
|
+ check_pabits();
|
|
|
|
|
|
#ifdef CONFIG_64BIT
|
|
#ifdef CONFIG_64BIT
|
|
check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
|
|
check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
|