|
@@ -154,23 +154,22 @@ unlock:
|
|
|
* This must be called only on pages that have
|
|
|
* been verified to be in the swap cache.
|
|
|
*/
|
|
|
-void __delete_from_swap_cache(struct page *page)
|
|
|
+void __delete_from_swap_cache(struct page *page, swp_entry_t entry)
|
|
|
{
|
|
|
- struct address_space *address_space;
|
|
|
+ struct address_space *address_space = swap_address_space(entry);
|
|
|
int i, nr = hpage_nr_pages(page);
|
|
|
- swp_entry_t entry;
|
|
|
- pgoff_t idx;
|
|
|
+ pgoff_t idx = swp_offset(entry);
|
|
|
+ XA_STATE(xas, &address_space->i_pages, idx);
|
|
|
|
|
|
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
|
|
VM_BUG_ON_PAGE(!PageSwapCache(page), page);
|
|
|
VM_BUG_ON_PAGE(PageWriteback(page), page);
|
|
|
|
|
|
- entry.val = page_private(page);
|
|
|
- address_space = swap_address_space(entry);
|
|
|
- idx = swp_offset(entry);
|
|
|
for (i = 0; i < nr; i++) {
|
|
|
- radix_tree_delete(&address_space->i_pages, idx + i);
|
|
|
+ void *entry = xas_store(&xas, NULL);
|
|
|
+ VM_BUG_ON_PAGE(entry != page + i, entry);
|
|
|
set_page_private(page + i, 0);
|
|
|
+ xas_next(&xas);
|
|
|
}
|
|
|
ClearPageSwapCache(page);
|
|
|
address_space->nrpages -= nr;
|
|
@@ -243,14 +242,11 @@ fail:
|
|
|
*/
|
|
|
void delete_from_swap_cache(struct page *page)
|
|
|
{
|
|
|
- swp_entry_t entry;
|
|
|
- struct address_space *address_space;
|
|
|
-
|
|
|
- entry.val = page_private(page);
|
|
|
+ swp_entry_t entry = { .val = page_private(page) };
|
|
|
+ struct address_space *address_space = swap_address_space(entry);
|
|
|
|
|
|
- address_space = swap_address_space(entry);
|
|
|
xa_lock_irq(&address_space->i_pages);
|
|
|
- __delete_from_swap_cache(page);
|
|
|
+ __delete_from_swap_cache(page, entry);
|
|
|
xa_unlock_irq(&address_space->i_pages);
|
|
|
|
|
|
put_swap_page(page, entry);
|