|
@@ -55,8 +55,6 @@ extern pmdval_t early_pmd_flags;
|
|
|
#else /* !CONFIG_PARAVIRT */
|
|
|
#define set_pte(ptep, pte) native_set_pte(ptep, pte)
|
|
|
#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
|
|
|
-#define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd)
|
|
|
-#define set_pud_at(mm, addr, pudp, pud) native_set_pud_at(mm, addr, pudp, pud)
|
|
|
|
|
|
#define set_pte_atomic(ptep, pte) \
|
|
|
native_set_pte_atomic(ptep, pte)
|
|
@@ -87,8 +85,6 @@ extern pmdval_t early_pmd_flags;
|
|
|
#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
|
|
|
#define pmd_clear(pmd) native_pmd_clear(pmd)
|
|
|
|
|
|
-#define pte_update(mm, addr, ptep) do { } while (0)
|
|
|
-
|
|
|
#define pgd_val(x) native_pgd_val(x)
|
|
|
#define __pgd(x) native_make_pgd(x)
|
|
|
|
|
@@ -979,31 +975,18 @@ static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
|
|
|
native_set_pte(ptep, pte);
|
|
|
}
|
|
|
|
|
|
-static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
|
|
- pmd_t *pmdp , pmd_t pmd)
|
|
|
+static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
|
|
+ pmd_t *pmdp, pmd_t pmd)
|
|
|
{
|
|
|
native_set_pmd(pmdp, pmd);
|
|
|
}
|
|
|
|
|
|
-static inline void native_set_pud_at(struct mm_struct *mm, unsigned long addr,
|
|
|
- pud_t *pudp, pud_t pud)
|
|
|
+static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
|
|
|
+ pud_t *pudp, pud_t pud)
|
|
|
{
|
|
|
native_set_pud(pudp, pud);
|
|
|
}
|
|
|
|
|
|
-#ifndef CONFIG_PARAVIRT
|
|
|
-/*
|
|
|
- * Rules for using pte_update - it must be called after any PTE update which
|
|
|
- * has not been done using the set_pte / clear_pte interfaces. It is used by
|
|
|
- * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
|
|
|
- * updates should either be sets, clears, or set_pte_atomic for P->P
|
|
|
- * transitions, which means this hook should only be called for user PTEs.
|
|
|
- * This hook implies a P->P protection or access change has taken place, which
|
|
|
- * requires a subsequent TLB flush.
|
|
|
- */
|
|
|
-#define pte_update(mm, addr, ptep) do { } while (0)
|
|
|
-#endif
|
|
|
-
|
|
|
/*
|
|
|
* We only update the dirty/accessed state if we set
|
|
|
* the dirty bit by hand in the kernel, since the hardware
|
|
@@ -1031,7 +1014,6 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
|
|
pte_t *ptep)
|
|
|
{
|
|
|
pte_t pte = native_ptep_get_and_clear(ptep);
|
|
|
- pte_update(mm, addr, ptep);
|
|
|
return pte;
|
|
|
}
|
|
|
|
|
@@ -1058,7 +1040,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
|
|
|
unsigned long addr, pte_t *ptep)
|
|
|
{
|
|
|
clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
|
|
|
- pte_update(mm, addr, ptep);
|
|
|
}
|
|
|
|
|
|
#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
|