|
@@ -339,9 +339,9 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
|
|
|
struct address_space *mapping = inode->i_mapping;
|
|
|
struct bdi_writeback *old_wb = inode->i_wb;
|
|
|
struct bdi_writeback *new_wb = isw->new_wb;
|
|
|
- struct radix_tree_iter iter;
|
|
|
+ XA_STATE(xas, &mapping->i_pages, 0);
|
|
|
+ struct page *page;
|
|
|
bool switched = false;
|
|
|
- void **slot;
|
|
|
|
|
|
/*
|
|
|
* By the time control reaches here, RCU grace period has passed
|
|
@@ -375,25 +375,18 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
|
|
|
* to possibly dirty pages while PAGECACHE_TAG_WRITEBACK points to
|
|
|
* pages actually under writeback.
|
|
|
*/
|
|
|
- radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, 0,
|
|
|
- PAGECACHE_TAG_DIRTY) {
|
|
|
- struct page *page = radix_tree_deref_slot_protected(slot,
|
|
|
- &mapping->i_pages.xa_lock);
|
|
|
- if (likely(page) && PageDirty(page)) {
|
|
|
+ xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_DIRTY) {
|
|
|
+ if (PageDirty(page)) {
|
|
|
dec_wb_stat(old_wb, WB_RECLAIMABLE);
|
|
|
inc_wb_stat(new_wb, WB_RECLAIMABLE);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, 0,
|
|
|
- PAGECACHE_TAG_WRITEBACK) {
|
|
|
- struct page *page = radix_tree_deref_slot_protected(slot,
|
|
|
- &mapping->i_pages.xa_lock);
|
|
|
- if (likely(page)) {
|
|
|
- WARN_ON_ONCE(!PageWriteback(page));
|
|
|
- dec_wb_stat(old_wb, WB_WRITEBACK);
|
|
|
- inc_wb_stat(new_wb, WB_WRITEBACK);
|
|
|
- }
|
|
|
+ xas_set(&xas, 0);
|
|
|
+ xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_WRITEBACK) {
|
|
|
+ WARN_ON_ONCE(!PageWriteback(page));
|
|
|
+ dec_wb_stat(old_wb, WB_WRITEBACK);
|
|
|
+ inc_wb_stat(new_wb, WB_WRITEBACK);
|
|
|
}
|
|
|
|
|
|
wb_get(new_wb);
|