|
@@ -139,7 +139,7 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm,
|
|
|
|
|
|
unsigned long new_pte;
|
|
|
|
|
|
- old_pte = __radix_pte_update(ptep, ~0, 0);
|
|
|
+ old_pte = __radix_pte_update(ptep, ~0ul, 0);
|
|
|
/*
|
|
|
* new value of pte
|
|
|
*/
|
|
@@ -157,6 +157,27 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm,
|
|
|
return old_pte;
|
|
|
}
|
|
|
|
|
|
+static inline pte_t radix__ptep_get_and_clear_full(struct mm_struct *mm,
|
|
|
+ unsigned long addr,
|
|
|
+ pte_t *ptep, int full)
|
|
|
+{
|
|
|
+ unsigned long old_pte;
|
|
|
+
|
|
|
+ if (full) {
|
|
|
+ /*
|
|
|
+ * If we are trying to clear the pte, we can skip
|
|
|
+ * the DD1 pte update sequence and batch the tlb flush. The
|
|
|
+ * tlb flush batching is done by mmu gather code. We
|
|
|
+ * still keep the cmp_xchg update to make sure we get
|
|
|
+ * correct R/C bit which might be updated via Nest MMU.
|
|
|
+ */
|
|
|
+ old_pte = __radix_pte_update(ptep, ~0ul, 0);
|
|
|
+ } else
|
|
|
+ old_pte = radix__pte_update(mm, addr, ptep, ~0ul, 0, 0);
|
|
|
+
|
|
|
+ return __pte(old_pte);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Set the dirty and/or accessed bits atomically in a linux PTE, this
|
|
|
* function doesn't need to invalidate tlb.
|