|
@@ -195,6 +195,7 @@ extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
|
|
|
static inline unsigned long pte_update(struct mm_struct *mm,
|
|
|
unsigned long addr,
|
|
|
pte_t *ptep, unsigned long clr,
|
|
|
+ unsigned long set,
|
|
|
int huge)
|
|
|
{
|
|
|
#ifdef PTE_ATOMIC_UPDATES
|
|
@@ -205,14 +206,15 @@ static inline unsigned long pte_update(struct mm_struct *mm,
|
|
|
andi. %1,%0,%6\n\
|
|
|
bne- 1b \n\
|
|
|
andc %1,%0,%4 \n\
|
|
|
+ or %1,%1,%7\n\
|
|
|
stdcx. %1,0,%3 \n\
|
|
|
bne- 1b"
|
|
|
: "=&r" (old), "=&r" (tmp), "=m" (*ptep)
|
|
|
- : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY)
|
|
|
+ : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set)
|
|
|
: "cc" );
|
|
|
#else
|
|
|
unsigned long old = pte_val(*ptep);
|
|
|
- *ptep = __pte(old & ~clr);
|
|
|
+ *ptep = __pte((old & ~clr) | set);
|
|
|
#endif
|
|
|
/* huge pages use the old page table lock */
|
|
|
if (!huge)
|
|
@@ -231,9 +233,9 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
|
|
|
{
|
|
|
unsigned long old;
|
|
|
|
|
|
- if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
|
|
|
+ if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
|
|
|
return 0;
|
|
|
- old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0);
|
|
|
+ old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
|
|
|
return (old & _PAGE_ACCESSED) != 0;
|
|
|
}
|
|
|
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
|
|
@@ -252,7 +254,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
|
|
|
if ((pte_val(*ptep) & _PAGE_RW) == 0)
|
|
|
return;
|
|
|
|
|
|
- pte_update(mm, addr, ptep, _PAGE_RW, 0);
|
|
|
+ pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
|
|
|
}
|
|
|
|
|
|
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
|
@@ -261,7 +263,7 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
|
|
if ((pte_val(*ptep) & _PAGE_RW) == 0)
|
|
|
return;
|
|
|
|
|
|
- pte_update(mm, addr, ptep, _PAGE_RW, 1);
|
|
|
+ pte_update(mm, addr, ptep, _PAGE_RW, 0, 1);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -284,14 +286,14 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
|
|
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
|
|
|
unsigned long addr, pte_t *ptep)
|
|
|
{
|
|
|
- unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0);
|
|
|
+ unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
|
|
|
return __pte(old);
|
|
|
}
|
|
|
|
|
|
static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
|
|
|
pte_t * ptep)
|
|
|
{
|
|
|
- pte_update(mm, addr, ptep, ~0UL, 0);
|
|
|
+ pte_update(mm, addr, ptep, ~0UL, 0, 0);
|
|
|
}
|
|
|
|
|
|
|
|
@@ -506,7 +508,9 @@ extern int pmdp_set_access_flags(struct vm_area_struct *vma,
|
|
|
|
|
|
extern unsigned long pmd_hugepage_update(struct mm_struct *mm,
|
|
|
unsigned long addr,
|
|
|
- pmd_t *pmdp, unsigned long clr);
|
|
|
+ pmd_t *pmdp,
|
|
|
+ unsigned long clr,
|
|
|
+ unsigned long set);
|
|
|
|
|
|
static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
|
|
|
unsigned long addr, pmd_t *pmdp)
|
|
@@ -515,7 +519,7 @@ static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
|
|
|
|
|
|
if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
|
|
|
return 0;
|
|
|
- old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED);
|
|
|
+ old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0);
|
|
|
return ((old & _PAGE_ACCESSED) != 0);
|
|
|
}
|
|
|
|
|
@@ -542,7 +546,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
|
|
|
if ((pmd_val(*pmdp) & _PAGE_RW) == 0)
|
|
|
return;
|
|
|
|
|
|
- pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW);
|
|
|
+ pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0);
|
|
|
}
|
|
|
|
|
|
#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
|