|
@@ -270,6 +270,8 @@ static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
+ atomic_set(&page->pt_frag_refcount, 1);
|
|
|
+
|
|
|
ret = page_address(page);
|
|
|
/*
|
|
|
* if we support only one fragment just return the
|
|
@@ -285,7 +287,7 @@ static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
|
|
|
* count.
|
|
|
*/
|
|
|
if (likely(!mm->context.pmd_frag)) {
|
|
|
- set_page_count(page, PMD_FRAG_NR);
|
|
|
+ atomic_set(&page->pt_frag_refcount, PMD_FRAG_NR);
|
|
|
mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
|
|
|
}
|
|
|
spin_unlock(&mm->page_table_lock);
|
|
@@ -308,9 +310,10 @@ void pmd_fragment_free(unsigned long *pmd)
|
|
|
{
|
|
|
struct page *page = virt_to_page(pmd);
|
|
|
|
|
|
- if (put_page_testzero(page)) {
|
|
|
+ BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
|
|
|
+ if (atomic_dec_and_test(&page->pt_frag_refcount)) {
|
|
|
pgtable_pmd_page_dtor(page);
|
|
|
- free_unref_page(page);
|
|
|
+ __free_page(page);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -352,6 +355,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
+ atomic_set(&page->pt_frag_refcount, 1);
|
|
|
|
|
|
ret = page_address(page);
|
|
|
/*
|
|
@@ -367,7 +371,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
|
|
|
* count.
|
|
|
*/
|
|
|
if (likely(!mm->context.pte_frag)) {
|
|
|
- set_page_count(page, PTE_FRAG_NR);
|
|
|
+ atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR);
|
|
|
mm->context.pte_frag = ret + PTE_FRAG_SIZE;
|
|
|
}
|
|
|
spin_unlock(&mm->page_table_lock);
|
|
@@ -390,10 +394,11 @@ void pte_fragment_free(unsigned long *table, int kernel)
|
|
|
{
|
|
|
struct page *page = virt_to_page(table);
|
|
|
|
|
|
- if (put_page_testzero(page)) {
|
|
|
+ BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
|
|
|
+ if (atomic_dec_and_test(&page->pt_frag_refcount)) {
|
|
|
if (!kernel)
|
|
|
pgtable_page_dtor(page);
|
|
|
- free_unref_page(page);
|
|
|
+ __free_page(page);
|
|
|
}
|
|
|
}
|
|
|
|