|
@@ -3,7 +3,7 @@
|
|
#define _ASM_POWERPC_NOHASH_64_PGTABLE_H
|
|
#define _ASM_POWERPC_NOHASH_64_PGTABLE_H
|
|
/*
|
|
/*
|
|
* This file contains the functions and defines necessary to modify and use
|
|
* This file contains the functions and defines necessary to modify and use
|
|
- * the ppc64 hashed page table.
|
|
|
|
|
|
+ * the ppc64 non-hashed page table.
|
|
*/
|
|
*/
|
|
|
|
|
|
#include <asm/nohash/64/pgtable-4k.h>
|
|
#include <asm/nohash/64/pgtable-4k.h>
|
|
@@ -38,7 +38,7 @@
|
|
|
|
|
|
/*
|
|
/*
|
|
* The vmalloc space starts at the beginning of that region, and
|
|
* The vmalloc space starts at the beginning of that region, and
|
|
- * occupies half of it on hash CPUs and a quarter of it on Book3E
|
|
|
|
|
|
+ * occupies a quarter of it on Book3E
|
|
* (we keep a quarter for the virtual memmap)
|
|
* (we keep a quarter for the virtual memmap)
|
|
*/
|
|
*/
|
|
#define VMALLOC_START KERN_VIRT_START
|
|
#define VMALLOC_START KERN_VIRT_START
|
|
@@ -78,7 +78,7 @@
|
|
|
|
|
|
/*
|
|
/*
|
|
* Defines the address of the vmemap area, in its own region on
|
|
* Defines the address of the vmemap area, in its own region on
|
|
- * hash table CPUs and after the vmalloc space on Book3E
|
|
|
|
|
|
+ * after the vmalloc space on Book3E
|
|
*/
|
|
*/
|
|
#define VMEMMAP_BASE VMALLOC_END
|
|
#define VMEMMAP_BASE VMALLOC_END
|
|
#define VMEMMAP_END KERN_IO_START
|
|
#define VMEMMAP_END KERN_IO_START
|
|
@@ -248,14 +248,6 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
|
pte_update(mm, addr, ptep, _PAGE_RW, 0, 1);
|
|
pte_update(mm, addr, ptep, _PAGE_RW, 0, 1);
|
|
}
|
|
}
|
|
|
|
|
|
-/*
|
|
|
|
- * We currently remove entries from the hashtable regardless of whether
|
|
|
|
- * the entry was young or dirty. The generic routines only flush if the
|
|
|
|
- * entry was young or dirty which is not good enough.
|
|
|
|
- *
|
|
|
|
- * We should be more intelligent about this but for the moment we override
|
|
|
|
- * these functions and force a tlb flush unconditionally
|
|
|
|
- */
|
|
|
|
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
|
|
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
|
|
#define ptep_clear_flush_young(__vma, __address, __ptep) \
|
|
#define ptep_clear_flush_young(__vma, __address, __ptep) \
|
|
({ \
|
|
({ \
|
|
@@ -279,9 +271,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
-/* Set the dirty and/or accessed bits atomically in a linux PTE, this
|
|
|
|
- * function doesn't need to flush the hash entry
|
|
|
|
- */
|
|
|
|
|
|
+/* Set the dirty and/or accessed bits atomically in a linux PTE */
|
|
static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
|
|
static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
|
|
pte_t *ptep, pte_t entry,
|
|
pte_t *ptep, pte_t entry,
|
|
unsigned long address,
|
|
unsigned long address,
|