|
@@ -575,6 +575,7 @@ static void dispose_list(struct list_head *head)
|
|
|
list_del_init(&inode->i_lru);
|
|
|
|
|
|
evict(inode);
|
|
|
+ cond_resched();
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -592,6 +593,7 @@ void evict_inodes(struct super_block *sb)
|
|
|
struct inode *inode, *next;
|
|
|
LIST_HEAD(dispose);
|
|
|
|
|
|
+again:
|
|
|
spin_lock(&sb->s_inode_list_lock);
|
|
|
list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
|
|
|
if (atomic_read(&inode->i_count))
|
|
@@ -607,6 +609,18 @@ void evict_inodes(struct super_block *sb)
|
|
|
inode_lru_list_del(inode);
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
list_add(&inode->i_lru, &dispose);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We can have a ton of inodes to evict at unmount time given
|
|
|
+ * enough memory, check to see if we need to go to sleep for a
|
|
|
+ * bit so we don't livelock.
|
|
|
+ */
|
|
|
+ if (need_resched()) {
|
|
|
+ spin_unlock(&sb->s_inode_list_lock);
|
|
|
+ cond_resched();
|
|
|
+ dispose_list(&dispose);
|
|
|
+ goto again;
|
|
|
+ }
|
|
|
}
|
|
|
spin_unlock(&sb->s_inode_list_lock);
|
|
|
|