|
@@ -33,13 +33,16 @@
|
|
|
#include "pat_internal.h"
|
|
|
#include "mm_internal.h"
|
|
|
|
|
|
+#undef pr_fmt
|
|
|
+#define pr_fmt(fmt) "" fmt
|
|
|
+
|
|
|
#ifdef CONFIG_X86_PAT
|
|
|
int __read_mostly pat_enabled = 1;
|
|
|
|
|
|
static inline void pat_disable(const char *reason)
|
|
|
{
|
|
|
pat_enabled = 0;
|
|
|
- printk(KERN_INFO "%s\n", reason);
|
|
|
+ pr_info("x86/PAT: %s\n", reason);
|
|
|
}
|
|
|
|
|
|
static int __init nopat(char *str)
|
|
@@ -188,7 +191,7 @@ void pat_init_cache_modes(void)
|
|
|
pat_msg + 4 * i);
|
|
|
update_cache_mode_entry(i, cache);
|
|
|
}
|
|
|
- pr_info("PAT configuration [0-7]: %s\n", pat_msg);
|
|
|
+ pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg);
|
|
|
}
|
|
|
|
|
|
#define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
|
|
@@ -211,8 +214,7 @@ void pat_init(void)
|
|
|
* switched to PAT on the boot CPU. We have no way to
|
|
|
* undo PAT.
|
|
|
*/
|
|
|
- printk(KERN_ERR "PAT enabled, "
|
|
|
- "but not supported by secondary CPU\n");
|
|
|
+ pr_err("x86/PAT: PAT enabled, but not supported by secondary CPU\n");
|
|
|
BUG();
|
|
|
}
|
|
|
}
|
|
@@ -347,7 +349,7 @@ static int reserve_ram_pages_type(u64 start, u64 end,
|
|
|
page = pfn_to_page(pfn);
|
|
|
type = get_page_memtype(page);
|
|
|
if (type != -1) {
|
|
|
- pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
|
|
|
+ pr_info("x86/PAT: reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
|
|
|
start, end - 1, type, req_type);
|
|
|
if (new_type)
|
|
|
*new_type = type;
|
|
@@ -451,9 +453,9 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
|
|
|
|
|
|
err = rbt_memtype_check_insert(new, new_type);
|
|
|
if (err) {
|
|
|
- printk(KERN_INFO "reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
|
|
|
- start, end - 1,
|
|
|
- cattr_name(new->type), cattr_name(req_type));
|
|
|
+ pr_info("x86/PAT: reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
|
|
|
+ start, end - 1,
|
|
|
+ cattr_name(new->type), cattr_name(req_type));
|
|
|
kfree(new);
|
|
|
spin_unlock(&memtype_lock);
|
|
|
|
|
@@ -497,8 +499,8 @@ int free_memtype(u64 start, u64 end)
|
|
|
spin_unlock(&memtype_lock);
|
|
|
|
|
|
if (!entry) {
|
|
|
- printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
|
|
|
- current->comm, current->pid, start, end - 1);
|
|
|
+ pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
|
|
|
+ current->comm, current->pid, start, end - 1);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
@@ -628,8 +630,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
|
|
|
|
|
|
while (cursor < to) {
|
|
|
if (!devmem_is_allowed(pfn)) {
|
|
|
- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx], PAT prevents it\n",
|
|
|
- current->comm, from, to - 1);
|
|
|
+ pr_info("x86/PAT: Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx], PAT prevents it\n",
|
|
|
+ current->comm, from, to - 1);
|
|
|
return 0;
|
|
|
}
|
|
|
cursor += PAGE_SIZE;
|
|
@@ -698,8 +700,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size,
|
|
|
size;
|
|
|
|
|
|
if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
|
|
|
- printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
|
|
|
- "for [mem %#010Lx-%#010Lx]\n",
|
|
|
+ pr_info("x86/PAT: %s:%d ioremap_change_attr failed %s for [mem %#010Lx-%#010Lx]\n",
|
|
|
current->comm, current->pid,
|
|
|
cattr_name(pcm),
|
|
|
base, (unsigned long long)(base + size-1));
|
|
@@ -734,7 +735,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
|
|
|
|
|
|
pcm = lookup_memtype(paddr);
|
|
|
if (want_pcm != pcm) {
|
|
|
- printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
|
|
|
+ pr_warn("x86/PAT: %s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
|
|
|
current->comm, current->pid,
|
|
|
cattr_name(want_pcm),
|
|
|
(unsigned long long)paddr,
|
|
@@ -755,13 +756,12 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
|
|
|
if (strict_prot ||
|
|
|
!is_new_memtype_allowed(paddr, size, want_pcm, pcm)) {
|
|
|
free_memtype(paddr, paddr + size);
|
|
|
- printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
|
|
|
- " for [mem %#010Lx-%#010Lx], got %s\n",
|
|
|
- current->comm, current->pid,
|
|
|
- cattr_name(want_pcm),
|
|
|
- (unsigned long long)paddr,
|
|
|
- (unsigned long long)(paddr + size - 1),
|
|
|
- cattr_name(pcm));
|
|
|
+ pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n",
|
|
|
+ current->comm, current->pid,
|
|
|
+ cattr_name(want_pcm),
|
|
|
+ (unsigned long long)paddr,
|
|
|
+ (unsigned long long)(paddr + size - 1),
|
|
|
+ cattr_name(pcm));
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
/*
|