|
@@ -1091,8 +1091,12 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
|
|
|
_PAGE_RW | _PAGE_EXEC);
|
|
|
-
|
|
|
- if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
|
|
|
+ /*
|
|
|
+ * To avoid NMMU hang while relaxing access, we need mark
|
|
|
+ * the pte invalid in between.
|
|
|
+ */
|
|
|
+ if (cpu_has_feature(CPU_FTR_POWER9_DD1) ||
|
|
|
+ atomic_read(&mm->context.copros) > 0) {
|
|
|
unsigned long old_pte, new_pte;
|
|
|
|
|
|
old_pte = __radix_pte_update(ptep, ~0, 0);
|
|
@@ -1100,9 +1104,11 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
|
|
|
* new value of pte
|
|
|
*/
|
|
|
new_pte = old_pte | set;
|
|
|
- radix__flush_tlb_pte_p9_dd1(old_pte, mm, address);
|
|
|
+ radix__flush_tlb_page_psize(mm, address, psize);
|
|
|
__radix_pte_update(ptep, 0, new_pte);
|
|
|
- } else
|
|
|
+ } else {
|
|
|
__radix_pte_update(ptep, 0, set);
|
|
|
+ radix__flush_tlb_page_psize(mm, address, psize);
|
|
|
+ }
|
|
|
asm volatile("ptesync" : : : "memory");
|
|
|
}
|