|
@@ -190,14 +190,15 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
|
|
|
if (!list_empty(&mm->context.pgtable_list)) {
|
|
|
page = list_first_entry(&mm->context.pgtable_list,
|
|
|
struct page, lru);
|
|
|
- mask = atomic_read(&page->_mapcount);
|
|
|
+ mask = atomic_read(&page->_refcount) >> 24;
|
|
|
mask = (mask | (mask >> 4)) & 3;
|
|
|
if (mask != 3) {
|
|
|
table = (unsigned long *) page_to_phys(page);
|
|
|
bit = mask & 1; /* =1 -> second 2K */
|
|
|
if (bit)
|
|
|
table += PTRS_PER_PTE;
|
|
|
- atomic_xor_bits(&page->_mapcount, 1U << bit);
|
|
|
+ atomic_xor_bits(&page->_refcount,
|
|
|
+ 1U << (bit + 24));
|
|
|
list_del(&page->lru);
|
|
|
}
|
|
|
}
|
|
@@ -218,12 +219,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
|
|
|
table = (unsigned long *) page_to_phys(page);
|
|
|
if (mm_alloc_pgste(mm)) {
|
|
|
/* Return 4K page table with PGSTEs */
|
|
|
- atomic_set(&page->_mapcount, 3);
|
|
|
+ atomic_xor_bits(&page->_refcount, 3 << 24);
|
|
|
memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
|
|
|
memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
|
|
|
} else {
|
|
|
/* Return the first 2K fragment of the page */
|
|
|
- atomic_set(&page->_mapcount, 1);
|
|
|
+ atomic_xor_bits(&page->_refcount, 1 << 24);
|
|
|
memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
|
|
|
spin_lock_bh(&mm->context.lock);
|
|
|
list_add(&page->lru, &mm->context.pgtable_list);
|
|
@@ -242,7 +243,8 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
|
|
|
/* Free 2K page table fragment of a 4K page */
|
|
|
bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
|
|
|
spin_lock_bh(&mm->context.lock);
|
|
|
- mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
|
|
|
+ mask = atomic_xor_bits(&page->_refcount, 1U << (bit + 24));
|
|
|
+ mask >>= 24;
|
|
|
if (mask & 3)
|
|
|
list_add(&page->lru, &mm->context.pgtable_list);
|
|
|
else
|
|
@@ -253,7 +255,6 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
|
|
|
}
|
|
|
|
|
|
pgtable_page_dtor(page);
|
|
|
- atomic_set(&page->_mapcount, -1);
|
|
|
__free_page(page);
|
|
|
}
|
|
|
|
|
@@ -274,7 +275,8 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
|
|
|
}
|
|
|
bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
|
|
|
spin_lock_bh(&mm->context.lock);
|
|
|
- mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
|
|
|
+ mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
|
|
|
+ mask >>= 24;
|
|
|
if (mask & 3)
|
|
|
list_add_tail(&page->lru, &mm->context.pgtable_list);
|
|
|
else
|
|
@@ -296,12 +298,13 @@ static void __tlb_remove_table(void *_table)
|
|
|
break;
|
|
|
case 1: /* lower 2K of a 4K page table */
|
|
|
case 2: /* higher 2K of a 4K page table */
|
|
|
- if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0)
|
|
|
+ mask = atomic_xor_bits(&page->_refcount, mask << (4 + 24));
|
|
|
+ mask >>= 24;
|
|
|
+ if (mask != 0)
|
|
|
break;
|
|
|
/* fallthrough */
|
|
|
case 3: /* 4K page table with pgstes */
|
|
|
pgtable_page_dtor(page);
|
|
|
- atomic_set(&page->_mapcount, -1);
|
|
|
__free_page(page);
|
|
|
break;
|
|
|
}
|