|
@@ -538,7 +538,7 @@ unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
|
|
|
*pmdp = __pmd((old & ~clr) | set);
|
|
|
#endif
|
|
|
if (old & _PAGE_HASHPTE)
|
|
|
- hpte_do_hugepage_flush(mm, addr, pmdp);
|
|
|
+ hpte_do_hugepage_flush(mm, addr, pmdp, old);
|
|
|
return old;
|
|
|
}
|
|
|
|
|
@@ -645,7 +645,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma,
|
|
|
if (!(old & _PAGE_SPLITTING)) {
|
|
|
/* We need to flush the hpte */
|
|
|
if (old & _PAGE_HASHPTE)
|
|
|
- hpte_do_hugepage_flush(vma->vm_mm, address, pmdp);
|
|
|
+ hpte_do_hugepage_flush(vma->vm_mm, address, pmdp, old);
|
|
|
}
|
|
|
/*
|
|
|
* This ensures that generic code that rely on IRQ disabling
|
|
@@ -723,7 +723,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
|
|
|
* neesd to be flushed.
|
|
|
*/
|
|
|
void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
|
|
|
- pmd_t *pmdp)
|
|
|
+ pmd_t *pmdp, unsigned long old_pmd)
|
|
|
{
|
|
|
int ssize, i;
|
|
|
unsigned long s_addr;
|
|
@@ -746,7 +746,15 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
|
|
|
return;
|
|
|
|
|
|
/* get the base page size,vsid and segment size */
|
|
|
+#ifdef CONFIG_DEBUG_VM
|
|
|
psize = get_slice_psize(mm, s_addr);
|
|
|
+ BUG_ON(psize == MMU_PAGE_16M);
|
|
|
+#endif
|
|
|
+ if (old_pmd & _PAGE_COMBO)
|
|
|
+ psize = MMU_PAGE_4K;
|
|
|
+ else
|
|
|
+ psize = MMU_PAGE_64K;
|
|
|
+
|
|
|
if (!is_kernel_addr(s_addr)) {
|
|
|
ssize = user_segment_size(s_addr);
|
|
|
vsid = get_vsid(mm->context.id, s_addr, ssize);
|