|
@@ -139,20 +139,21 @@ static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */
|
|
* The intersection is based on "Effective Memory Type" tables in IA-32
|
|
* The intersection is based on "Effective Memory Type" tables in IA-32
|
|
* SDM vol 3a
|
|
* SDM vol 3a
|
|
*/
|
|
*/
|
|
-static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
|
|
|
|
|
|
+static unsigned long pat_x_mtrr_type(u64 start, u64 end,
|
|
|
|
+ enum page_cache_mode req_type)
|
|
{
|
|
{
|
|
/*
|
|
/*
|
|
* Look for MTRR hint to get the effective type in case where PAT
|
|
* Look for MTRR hint to get the effective type in case where PAT
|
|
* request is for WB.
|
|
* request is for WB.
|
|
*/
|
|
*/
|
|
- if (req_type == _PAGE_CACHE_WB) {
|
|
|
|
|
|
+ if (req_type == _PAGE_CACHE_MODE_WB) {
|
|
u8 mtrr_type;
|
|
u8 mtrr_type;
|
|
|
|
|
|
mtrr_type = mtrr_type_lookup(start, end);
|
|
mtrr_type = mtrr_type_lookup(start, end);
|
|
if (mtrr_type != MTRR_TYPE_WRBACK)
|
|
if (mtrr_type != MTRR_TYPE_WRBACK)
|
|
- return _PAGE_CACHE_UC_MINUS;
|
|
|
|
|
|
+ return _PAGE_CACHE_MODE_UC_MINUS;
|
|
|
|
|
|
- return _PAGE_CACHE_WB;
|
|
|
|
|
|
+ return _PAGE_CACHE_MODE_WB;
|
|
}
|
|
}
|
|
|
|
|
|
return req_type;
|
|
return req_type;
|
|
@@ -207,25 +208,26 @@ static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
|
|
* - Find the memtype of all the pages in the range, look for any conflicts
|
|
* - Find the memtype of all the pages in the range, look for any conflicts
|
|
* - In case of no conflicts, set the new memtype for pages in the range
|
|
* - In case of no conflicts, set the new memtype for pages in the range
|
|
*/
|
|
*/
|
|
-static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
|
|
|
|
- unsigned long *new_type)
|
|
|
|
|
|
+static int reserve_ram_pages_type(u64 start, u64 end,
|
|
|
|
+ enum page_cache_mode req_type,
|
|
|
|
+ enum page_cache_mode *new_type)
|
|
{
|
|
{
|
|
struct page *page;
|
|
struct page *page;
|
|
u64 pfn;
|
|
u64 pfn;
|
|
|
|
|
|
- if (req_type == _PAGE_CACHE_UC) {
|
|
|
|
|
|
+ if (req_type == _PAGE_CACHE_MODE_UC) {
|
|
/* We do not support strong UC */
|
|
/* We do not support strong UC */
|
|
WARN_ON_ONCE(1);
|
|
WARN_ON_ONCE(1);
|
|
- req_type = _PAGE_CACHE_UC_MINUS;
|
|
|
|
|
|
+ req_type = _PAGE_CACHE_MODE_UC_MINUS;
|
|
}
|
|
}
|
|
|
|
|
|
for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
|
|
for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
|
|
- unsigned long type;
|
|
|
|
|
|
+ enum page_cache_mode type;
|
|
|
|
|
|
page = pfn_to_page(pfn);
|
|
page = pfn_to_page(pfn);
|
|
type = get_page_memtype(page);
|
|
type = get_page_memtype(page);
|
|
if (type != -1) {
|
|
if (type != -1) {
|
|
- printk(KERN_INFO "reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%lx, req 0x%lx\n",
|
|
|
|
|
|
+ pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
|
|
start, end - 1, type, req_type);
|
|
start, end - 1, type, req_type);
|
|
if (new_type)
|
|
if (new_type)
|
|
*new_type = type;
|
|
*new_type = type;
|
|
@@ -258,21 +260,21 @@ static int free_ram_pages_type(u64 start, u64 end)
|
|
|
|
|
|
/*
|
|
/*
|
|
* req_type typically has one of the:
|
|
* req_type typically has one of the:
|
|
- * - _PAGE_CACHE_WB
|
|
|
|
- * - _PAGE_CACHE_WC
|
|
|
|
- * - _PAGE_CACHE_UC_MINUS
|
|
|
|
- * - _PAGE_CACHE_UC
|
|
|
|
|
|
+ * - _PAGE_CACHE_MODE_WB
|
|
|
|
+ * - _PAGE_CACHE_MODE_WC
|
|
|
|
+ * - _PAGE_CACHE_MODE_UC_MINUS
|
|
|
|
+ * - _PAGE_CACHE_MODE_UC
|
|
*
|
|
*
|
|
* If new_type is NULL, function will return an error if it cannot reserve the
|
|
* If new_type is NULL, function will return an error if it cannot reserve the
|
|
* region with req_type. If new_type is non-NULL, function will return
|
|
* region with req_type. If new_type is non-NULL, function will return
|
|
* available type in new_type in case of no error. In case of any error
|
|
* available type in new_type in case of no error. In case of any error
|
|
* it will return a negative return value.
|
|
* it will return a negative return value.
|
|
*/
|
|
*/
|
|
-int reserve_memtype(u64 start, u64 end, unsigned long req_type,
|
|
|
|
- unsigned long *new_type)
|
|
|
|
|
|
+int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
|
|
|
|
+ enum page_cache_mode *new_type)
|
|
{
|
|
{
|
|
struct memtype *new;
|
|
struct memtype *new;
|
|
- unsigned long actual_type;
|
|
|
|
|
|
+ enum page_cache_mode actual_type;
|
|
int is_range_ram;
|
|
int is_range_ram;
|
|
int err = 0;
|
|
int err = 0;
|
|
|
|
|
|
@@ -281,10 +283,10 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
|
|
if (!pat_enabled) {
|
|
if (!pat_enabled) {
|
|
/* This is identical to page table setting without PAT */
|
|
/* This is identical to page table setting without PAT */
|
|
if (new_type) {
|
|
if (new_type) {
|
|
- if (req_type == _PAGE_CACHE_WC)
|
|
|
|
- *new_type = _PAGE_CACHE_UC_MINUS;
|
|
|
|
|
|
+ if (req_type == _PAGE_CACHE_MODE_WC)
|
|
|
|
+ *new_type = _PAGE_CACHE_MODE_UC_MINUS;
|
|
else
|
|
else
|
|
- *new_type = req_type & _PAGE_CACHE_MASK;
|
|
|
|
|
|
+ *new_type = req_type;
|
|
}
|
|
}
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -292,7 +294,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
|
|
/* Low ISA region is always mapped WB in page table. No need to track */
|
|
/* Low ISA region is always mapped WB in page table. No need to track */
|
|
if (x86_platform.is_untracked_pat_range(start, end)) {
|
|
if (x86_platform.is_untracked_pat_range(start, end)) {
|
|
if (new_type)
|
|
if (new_type)
|
|
- *new_type = _PAGE_CACHE_WB;
|
|
|
|
|
|
+ *new_type = _PAGE_CACHE_MODE_WB;
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -302,7 +304,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
|
|
* tools and ACPI tools). Use WB request for WB memory and use
|
|
* tools and ACPI tools). Use WB request for WB memory and use
|
|
* UC_MINUS otherwise.
|
|
* UC_MINUS otherwise.
|
|
*/
|
|
*/
|
|
- actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK);
|
|
|
|
|
|
+ actual_type = pat_x_mtrr_type(start, end, req_type);
|
|
|
|
|
|
if (new_type)
|
|
if (new_type)
|
|
*new_type = actual_type;
|
|
*new_type = actual_type;
|
|
@@ -408,7 +410,7 @@ static enum page_cache_mode lookup_memtype(u64 paddr)
|
|
if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
|
|
if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
|
|
struct page *page;
|
|
struct page *page;
|
|
page = pfn_to_page(paddr >> PAGE_SHIFT);
|
|
page = pfn_to_page(paddr >> PAGE_SHIFT);
|
|
- rettype = pgprot2cachemode(__pgprot(get_page_memtype(page)));
|
|
|
|
|
|
+ rettype = get_page_memtype(page);
|
|
/*
|
|
/*
|
|
* -1 from get_page_memtype() implies RAM page is in its
|
|
* -1 from get_page_memtype() implies RAM page is in its
|
|
* default state and not reserved, and hence of type WB
|
|
* default state and not reserved, and hence of type WB
|
|
@@ -423,7 +425,7 @@ static enum page_cache_mode lookup_memtype(u64 paddr)
|
|
|
|
|
|
entry = rbt_memtype_lookup(paddr);
|
|
entry = rbt_memtype_lookup(paddr);
|
|
if (entry != NULL)
|
|
if (entry != NULL)
|
|
- rettype = pgprot2cachemode(__pgprot(entry->type));
|
|
|
|
|
|
+ rettype = entry->type;
|
|
else
|
|
else
|
|
rettype = _PAGE_CACHE_MODE_UC_MINUS;
|
|
rettype = _PAGE_CACHE_MODE_UC_MINUS;
|
|
|
|
|
|
@@ -447,18 +449,14 @@ int io_reserve_memtype(resource_size_t start, resource_size_t end,
|
|
resource_size_t size = end - start;
|
|
resource_size_t size = end - start;
|
|
enum page_cache_mode req_type = *type;
|
|
enum page_cache_mode req_type = *type;
|
|
enum page_cache_mode new_type;
|
|
enum page_cache_mode new_type;
|
|
- unsigned long new_prot;
|
|
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
WARN_ON_ONCE(iomem_map_sanity_check(start, size));
|
|
WARN_ON_ONCE(iomem_map_sanity_check(start, size));
|
|
|
|
|
|
- ret = reserve_memtype(start, end, cachemode2protval(req_type),
|
|
|
|
- &new_prot);
|
|
|
|
|
|
+ ret = reserve_memtype(start, end, req_type, &new_type);
|
|
if (ret)
|
|
if (ret)
|
|
goto out_err;
|
|
goto out_err;
|
|
|
|
|
|
- new_type = pgprot2cachemode(__pgprot(new_prot));
|
|
|
|
-
|
|
|
|
if (!is_new_memtype_allowed(start, size, req_type, new_type))
|
|
if (!is_new_memtype_allowed(start, size, req_type, new_type))
|
|
goto out_free;
|
|
goto out_free;
|
|
|
|
|
|
@@ -524,13 +522,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
|
|
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
|
|
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
|
|
unsigned long size, pgprot_t *vma_prot)
|
|
unsigned long size, pgprot_t *vma_prot)
|
|
{
|
|
{
|
|
- unsigned long flags = _PAGE_CACHE_WB;
|
|
|
|
|
|
+ enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB;
|
|
|
|
|
|
if (!range_is_allowed(pfn, size))
|
|
if (!range_is_allowed(pfn, size))
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
if (file->f_flags & O_DSYNC)
|
|
if (file->f_flags & O_DSYNC)
|
|
- flags = _PAGE_CACHE_UC_MINUS;
|
|
|
|
|
|
+ pcm = _PAGE_CACHE_MODE_UC_MINUS;
|
|
|
|
|
|
#ifdef CONFIG_X86_32
|
|
#ifdef CONFIG_X86_32
|
|
/*
|
|
/*
|
|
@@ -547,12 +545,12 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
|
|
boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
|
|
boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
|
|
boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
|
|
boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
|
|
(pfn << PAGE_SHIFT) >= __pa(high_memory)) {
|
|
(pfn << PAGE_SHIFT) >= __pa(high_memory)) {
|
|
- flags = _PAGE_CACHE_UC;
|
|
|
|
|
|
+ pcm = _PAGE_CACHE_MODE_UC;
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
|
|
*vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
|
|
*vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
|
|
- flags);
|
|
|
|
|
|
+ cachemode2protval(pcm));
|
|
return 1;
|
|
return 1;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -583,7 +581,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size,
|
|
printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
|
|
printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
|
|
"for [mem %#010Lx-%#010Lx]\n",
|
|
"for [mem %#010Lx-%#010Lx]\n",
|
|
current->comm, current->pid,
|
|
current->comm, current->pid,
|
|
- cattr_name(cachemode2protval(pcm)),
|
|
|
|
|
|
+ cattr_name(pcm),
|
|
base, (unsigned long long)(base + size-1));
|
|
base, (unsigned long long)(base + size-1));
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
@@ -600,8 +598,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
|
|
{
|
|
{
|
|
int is_ram = 0;
|
|
int is_ram = 0;
|
|
int ret;
|
|
int ret;
|
|
- unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
|
|
|
|
- unsigned long flags = want_flags;
|
|
|
|
|
|
+ enum page_cache_mode want_pcm = pgprot2cachemode(*vma_prot);
|
|
|
|
+ enum page_cache_mode pcm = want_pcm;
|
|
|
|
|
|
is_ram = pat_pagerange_is_ram(paddr, paddr + size);
|
|
is_ram = pat_pagerange_is_ram(paddr, paddr + size);
|
|
|
|
|
|
@@ -614,38 +612,36 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
|
|
if (!pat_enabled)
|
|
if (!pat_enabled)
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
- flags = cachemode2protval(lookup_memtype(paddr));
|
|
|
|
- if (want_flags != flags) {
|
|
|
|
|
|
+ pcm = lookup_memtype(paddr);
|
|
|
|
+ if (want_pcm != pcm) {
|
|
printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
|
|
printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
|
|
current->comm, current->pid,
|
|
current->comm, current->pid,
|
|
- cattr_name(want_flags),
|
|
|
|
|
|
+ cattr_name(want_pcm),
|
|
(unsigned long long)paddr,
|
|
(unsigned long long)paddr,
|
|
(unsigned long long)(paddr + size - 1),
|
|
(unsigned long long)(paddr + size - 1),
|
|
- cattr_name(flags));
|
|
|
|
|
|
+ cattr_name(pcm));
|
|
*vma_prot = __pgprot((pgprot_val(*vma_prot) &
|
|
*vma_prot = __pgprot((pgprot_val(*vma_prot) &
|
|
- (~_PAGE_CACHE_MASK)) |
|
|
|
|
- flags);
|
|
|
|
|
|
+ (~_PAGE_CACHE_MASK)) |
|
|
|
|
+ cachemode2protval(pcm));
|
|
}
|
|
}
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
- ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
|
|
|
|
|
|
+ ret = reserve_memtype(paddr, paddr + size, want_pcm, &pcm);
|
|
if (ret)
|
|
if (ret)
|
|
return ret;
|
|
return ret;
|
|
|
|
|
|
- if (flags != want_flags) {
|
|
|
|
|
|
+ if (pcm != want_pcm) {
|
|
if (strict_prot ||
|
|
if (strict_prot ||
|
|
- !is_new_memtype_allowed(paddr, size,
|
|
|
|
- pgprot2cachemode(__pgprot(want_flags)),
|
|
|
|
- pgprot2cachemode(__pgprot(flags)))) {
|
|
|
|
|
|
+ !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) {
|
|
free_memtype(paddr, paddr + size);
|
|
free_memtype(paddr, paddr + size);
|
|
printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
|
|
printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
|
|
" for [mem %#010Lx-%#010Lx], got %s\n",
|
|
" for [mem %#010Lx-%#010Lx], got %s\n",
|
|
current->comm, current->pid,
|
|
current->comm, current->pid,
|
|
- cattr_name(want_flags),
|
|
|
|
|
|
+ cattr_name(want_pcm),
|
|
(unsigned long long)paddr,
|
|
(unsigned long long)paddr,
|
|
(unsigned long long)(paddr + size - 1),
|
|
(unsigned long long)(paddr + size - 1),
|
|
- cattr_name(flags));
|
|
|
|
|
|
+ cattr_name(pcm));
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
/*
|
|
/*
|
|
@@ -654,11 +650,10 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
|
|
*/
|
|
*/
|
|
*vma_prot = __pgprot((pgprot_val(*vma_prot) &
|
|
*vma_prot = __pgprot((pgprot_val(*vma_prot) &
|
|
(~_PAGE_CACHE_MASK)) |
|
|
(~_PAGE_CACHE_MASK)) |
|
|
- flags);
|
|
|
|
|
|
+ cachemode2protval(pcm));
|
|
}
|
|
}
|
|
|
|
|
|
- if (kernel_map_sync_memtype(paddr, size,
|
|
|
|
- pgprot2cachemode(__pgprot(flags))) < 0) {
|
|
|
|
|
|
+ if (kernel_map_sync_memtype(paddr, size, pcm) < 0) {
|
|
free_memtype(paddr, paddr + size);
|
|
free_memtype(paddr, paddr + size);
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
@@ -799,7 +794,8 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
|
|
pgprot_t pgprot_writecombine(pgprot_t prot)
|
|
pgprot_t pgprot_writecombine(pgprot_t prot)
|
|
{
|
|
{
|
|
if (pat_enabled)
|
|
if (pat_enabled)
|
|
- return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC);
|
|
|
|
|
|
+ return __pgprot(pgprot_val(prot) |
|
|
|
|
+ cachemode2protval(_PAGE_CACHE_MODE_WC));
|
|
else
|
|
else
|
|
return pgprot_noncached(prot);
|
|
return pgprot_noncached(prot);
|
|
}
|
|
}
|