|
@@ -50,32 +50,22 @@ inline struct block_device *I_BDEV(struct inode *inode)
|
|
|
EXPORT_SYMBOL(I_BDEV);
|
|
|
|
|
|
/*
|
|
|
- * Move the inode from its current bdi to a new bdi. If the inode is dirty we
|
|
|
- * need to move it onto the dirty list of @dst so that the inode is always on
|
|
|
- * the right list.
|
|
|
+ * Move the inode from its current bdi to a new bdi. Make sure the inode
|
|
|
+ * is clean before moving so that it doesn't linger on the old bdi.
|
|
|
*/
|
|
|
static void bdev_inode_switch_bdi(struct inode *inode,
|
|
|
struct backing_dev_info *dst)
|
|
|
{
|
|
|
- struct backing_dev_info *old = inode->i_data.backing_dev_info;
|
|
|
- bool wakeup_bdi = false;
|
|
|
-
|
|
|
- if (unlikely(dst == old)) /* deadlock avoidance */
|
|
|
- return;
|
|
|
- bdi_lock_two(&old->wb, &dst->wb);
|
|
|
- spin_lock(&inode->i_lock);
|
|
|
- inode->i_data.backing_dev_info = dst;
|
|
|
- if (inode->i_state & I_DIRTY) {
|
|
|
- if (bdi_cap_writeback_dirty(dst) && !wb_has_dirty_io(&dst->wb))
|
|
|
- wakeup_bdi = true;
|
|
|
- list_move(&inode->i_wb_list, &dst->wb.b_dirty);
|
|
|
+ while (true) {
|
|
|
+ spin_lock(&inode->i_lock);
|
|
|
+ if (!(inode->i_state & I_DIRTY)) {
|
|
|
+ inode->i_data.backing_dev_info = dst;
|
|
|
+ spin_unlock(&inode->i_lock);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ spin_unlock(&inode->i_lock);
|
|
|
+ WARN_ON_ONCE(write_inode_now(inode, true));
|
|
|
}
|
|
|
- spin_unlock(&inode->i_lock);
|
|
|
- spin_unlock(&old->wb.list_lock);
|
|
|
- spin_unlock(&dst->wb.list_lock);
|
|
|
-
|
|
|
- if (wakeup_bdi)
|
|
|
- bdi_wakeup_thread_delayed(dst);
|
|
|
}
|
|
|
|
|
|
/* Kill _all_ buffers and pagecache , dirty or not.. */
|