|
@@ -2367,6 +2367,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce)
|
|
|
struct btrfs_fs_info *fs_info = root->fs_info;
|
|
|
u64 ref_root = root->root_key.objectid;
|
|
|
int ret = 0;
|
|
|
+ int retried = 0;
|
|
|
struct ulist_node *unode;
|
|
|
struct ulist_iterator uiter;
|
|
|
|
|
@@ -2375,7 +2376,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce)
|
|
|
|
|
|
if (num_bytes == 0)
|
|
|
return 0;
|
|
|
-
|
|
|
+retry:
|
|
|
spin_lock(&fs_info->qgroup_lock);
|
|
|
quota_root = fs_info->quota_root;
|
|
|
if (!quota_root)
|
|
@@ -2402,6 +2403,27 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce)
|
|
|
qg = unode_aux_to_qgroup(unode);
|
|
|
|
|
|
if (enforce && !qgroup_check_limits(qg, num_bytes)) {
|
|
|
+ /*
|
|
|
+ * Commit the tree and retry, since we may have
|
|
|
+ * deletions which would free up space.
|
|
|
+ */
|
|
|
+ if (!retried && qg->reserved > 0) {
|
|
|
+ struct btrfs_trans_handle *trans;
|
|
|
+
|
|
|
+ spin_unlock(&fs_info->qgroup_lock);
|
|
|
+ ret = btrfs_start_delalloc_inodes(root, 0);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+ btrfs_wait_ordered_extents(root, -1, 0, (u64)-1);
|
|
|
+ trans = btrfs_join_transaction(root);
|
|
|
+ if (IS_ERR(trans))
|
|
|
+ return PTR_ERR(trans);
|
|
|
+ ret = btrfs_commit_transaction(trans);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+ retried++;
|
|
|
+ goto retry;
|
|
|
+ }
|
|
|
ret = -EDQUOT;
|
|
|
goto out;
|
|
|
}
|