|
@@ -40,11 +40,22 @@
|
|
|
static bool boot_cpu_done;
|
|
|
|
|
|
static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT);
|
|
|
+static void init_cache_modes(void);
|
|
|
|
|
|
-static inline void pat_disable(const char *reason)
|
|
|
+void pat_disable(const char *reason)
|
|
|
{
|
|
|
+ if (!__pat_enabled)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (boot_cpu_done) {
|
|
|
+ WARN_ONCE(1, "x86/PAT: PAT cannot be disabled after initialization\n");
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
__pat_enabled = 0;
|
|
|
pr_info("x86/PAT: %s\n", reason);
|
|
|
+
|
|
|
+ init_cache_modes();
|
|
|
}
|
|
|
|
|
|
static int __init nopat(char *str)
|
|
@@ -181,7 +192,7 @@ static enum page_cache_mode pat_get_cache_mode(unsigned pat_val, char *msg)
|
|
|
* configuration.
|
|
|
* Using lower indices is preferred, so we start with highest index.
|
|
|
*/
|
|
|
-void pat_init_cache_modes(u64 pat)
|
|
|
+static void __init_cache_modes(u64 pat)
|
|
|
{
|
|
|
enum page_cache_mode cache;
|
|
|
char pat_msg[33];
|
|
@@ -202,14 +213,11 @@ static void pat_bsp_init(u64 pat)
|
|
|
{
|
|
|
u64 tmp_pat;
|
|
|
|
|
|
- if (!cpu_has_pat) {
|
|
|
+ if (!boot_cpu_has(X86_FEATURE_PAT)) {
|
|
|
pat_disable("PAT not supported by CPU.");
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- if (!pat_enabled())
|
|
|
- goto done;
|
|
|
-
|
|
|
rdmsrl(MSR_IA32_CR_PAT, tmp_pat);
|
|
|
if (!tmp_pat) {
|
|
|
pat_disable("PAT MSR is 0, disabled.");
|
|
@@ -218,16 +226,12 @@ static void pat_bsp_init(u64 pat)
|
|
|
|
|
|
wrmsrl(MSR_IA32_CR_PAT, pat);
|
|
|
|
|
|
-done:
|
|
|
- pat_init_cache_modes(pat);
|
|
|
+ __init_cache_modes(pat);
|
|
|
}
|
|
|
|
|
|
static void pat_ap_init(u64 pat)
|
|
|
{
|
|
|
- if (!pat_enabled())
|
|
|
- return;
|
|
|
-
|
|
|
- if (!cpu_has_pat) {
|
|
|
+ if (!boot_cpu_has(X86_FEATURE_PAT)) {
|
|
|
/*
|
|
|
* If this happens we are on a secondary CPU, but switched to
|
|
|
* PAT on the boot CPU. We have no way to undo PAT.
|
|
@@ -238,18 +242,32 @@ static void pat_ap_init(u64 pat)
|
|
|
wrmsrl(MSR_IA32_CR_PAT, pat);
|
|
|
}
|
|
|
|
|
|
-void pat_init(void)
|
|
|
+static void init_cache_modes(void)
|
|
|
{
|
|
|
- u64 pat;
|
|
|
- struct cpuinfo_x86 *c = &boot_cpu_data;
|
|
|
+ u64 pat = 0;
|
|
|
+ static int init_cm_done;
|
|
|
|
|
|
- if (!pat_enabled()) {
|
|
|
+ if (init_cm_done)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (boot_cpu_has(X86_FEATURE_PAT)) {
|
|
|
+ /*
|
|
|
+ * CPU supports PAT. Set PAT table to be consistent with
|
|
|
+ * PAT MSR. This case supports "nopat" boot option, and
|
|
|
+ * virtual machine environments which support PAT without
|
|
|
+ * MTRRs. In specific, Xen has unique setup to PAT MSR.
|
|
|
+ *
|
|
|
+ * If PAT MSR returns 0, it is considered invalid and emulates
|
|
|
+ * as No PAT.
|
|
|
+ */
|
|
|
+ rdmsrl(MSR_IA32_CR_PAT, pat);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!pat) {
|
|
|
/*
|
|
|
* No PAT. Emulate the PAT table that corresponds to the two
|
|
|
- * cache bits, PWT (Write Through) and PCD (Cache Disable). This
|
|
|
- * setup is the same as the BIOS default setup when the system
|
|
|
- * has PAT but the "nopat" boot option has been specified. This
|
|
|
- * emulated PAT table is used when MSR_IA32_CR_PAT returns 0.
|
|
|
+ * cache bits, PWT (Write Through) and PCD (Cache Disable).
|
|
|
+ * This setup is also the same as the BIOS default setup.
|
|
|
*
|
|
|
* PTE encoding:
|
|
|
*
|
|
@@ -266,10 +284,36 @@ void pat_init(void)
|
|
|
*/
|
|
|
pat = PAT(0, WB) | PAT(1, WT) | PAT(2, UC_MINUS) | PAT(3, UC) |
|
|
|
PAT(4, WB) | PAT(5, WT) | PAT(6, UC_MINUS) | PAT(7, UC);
|
|
|
+ }
|
|
|
+
|
|
|
+ __init_cache_modes(pat);
|
|
|
+
|
|
|
+ init_cm_done = 1;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * pat_init - Initialize PAT MSR and PAT table
|
|
|
+ *
|
|
|
+ * This function initializes PAT MSR and PAT table with an OS-defined value
|
|
|
+ * to enable additional cache attributes, WC and WT.
|
|
|
+ *
|
|
|
+ * This function must be called on all CPUs using the specific sequence of
|
|
|
+ * operations defined in Intel SDM. mtrr_rendezvous_handler() provides this
|
|
|
+ * procedure for PAT.
|
|
|
+ */
|
|
|
+void pat_init(void)
|
|
|
+{
|
|
|
+ u64 pat;
|
|
|
+ struct cpuinfo_x86 *c = &boot_cpu_data;
|
|
|
+
|
|
|
+ if (!pat_enabled()) {
|
|
|
+ init_cache_modes();
|
|
|
+ return;
|
|
|
+ }
|
|
|
|
|
|
- } else if ((c->x86_vendor == X86_VENDOR_INTEL) &&
|
|
|
- (((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||
|
|
|
- ((c->x86 == 0xf) && (c->x86_model <= 0x6)))) {
|
|
|
+ if ((c->x86_vendor == X86_VENDOR_INTEL) &&
|
|
|
+ (((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||
|
|
|
+ ((c->x86 == 0xf) && (c->x86_model <= 0x6)))) {
|
|
|
/*
|
|
|
* PAT support with the lower four entries. Intel Pentium 2,
|
|
|
* 3, M, and 4 are affected by PAT errata, which makes the
|
|
@@ -734,25 +778,6 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
|
|
|
if (file->f_flags & O_DSYNC)
|
|
|
pcm = _PAGE_CACHE_MODE_UC_MINUS;
|
|
|
|
|
|
-#ifdef CONFIG_X86_32
|
|
|
- /*
|
|
|
- * On the PPro and successors, the MTRRs are used to set
|
|
|
- * memory types for physical addresses outside main memory,
|
|
|
- * so blindly setting UC or PWT on those pages is wrong.
|
|
|
- * For Pentiums and earlier, the surround logic should disable
|
|
|
- * caching for the high addresses through the KEN pin, but
|
|
|
- * we maintain the tradition of paranoia in this code.
|
|
|
- */
|
|
|
- if (!pat_enabled() &&
|
|
|
- !(boot_cpu_has(X86_FEATURE_MTRR) ||
|
|
|
- boot_cpu_has(X86_FEATURE_K6_MTRR) ||
|
|
|
- boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
|
|
|
- boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
|
|
|
- (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
|
|
|
- pcm = _PAGE_CACHE_MODE_UC;
|
|
|
- }
|
|
|
-#endif
|
|
|
-
|
|
|
*vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
|
|
|
cachemode2protval(pcm));
|
|
|
return 1;
|