|
@@ -1806,6 +1806,64 @@ static int qgroup_calc_new_refcnt(struct btrfs_fs_info *fs_info,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+#define UPDATE_NEW 0
|
|
|
+#define UPDATE_OLD 1
|
|
|
+/*
|
|
|
+ * Walk all of the roots that points to the bytenr and adjust their refcnts.
|
|
|
+ */
|
|
|
+static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
|
|
|
+ struct ulist *roots, struct ulist *tmp,
|
|
|
+ struct ulist *qgroups, u64 seq, int update_old)
|
|
|
+{
|
|
|
+ struct ulist_node *unode;
|
|
|
+ struct ulist_iterator uiter;
|
|
|
+ struct ulist_node *tmp_unode;
|
|
|
+ struct ulist_iterator tmp_uiter;
|
|
|
+ struct btrfs_qgroup *qg;
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ if (!roots)
|
|
|
+ return 0;
|
|
|
+ ULIST_ITER_INIT(&uiter);
|
|
|
+ while ((unode = ulist_next(roots, &uiter))) {
|
|
|
+ qg = find_qgroup_rb(fs_info, unode->val);
|
|
|
+ if (!qg)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ ulist_reinit(tmp);
|
|
|
+ ret = ulist_add(qgroups, qg->qgroupid, ptr_to_u64(qg),
|
|
|
+ GFP_ATOMIC);
|
|
|
+ if (ret < 0)
|
|
|
+ return ret;
|
|
|
+ ret = ulist_add(tmp, qg->qgroupid, ptr_to_u64(qg), GFP_ATOMIC);
|
|
|
+ if (ret < 0)
|
|
|
+ return ret;
|
|
|
+ ULIST_ITER_INIT(&tmp_uiter);
|
|
|
+ while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
|
|
|
+ struct btrfs_qgroup_list *glist;
|
|
|
+
|
|
|
+ qg = u64_to_ptr(tmp_unode->aux);
|
|
|
+ if (update_old)
|
|
|
+ btrfs_qgroup_update_old_refcnt(qg, seq, 1);
|
|
|
+ else
|
|
|
+ btrfs_qgroup_update_new_refcnt(qg, seq, 1);
|
|
|
+ list_for_each_entry(glist, &qg->groups, next_group) {
|
|
|
+ ret = ulist_add(qgroups, glist->group->qgroupid,
|
|
|
+ ptr_to_u64(glist->group),
|
|
|
+ GFP_ATOMIC);
|
|
|
+ if (ret < 0)
|
|
|
+ return ret;
|
|
|
+ ret = ulist_add(tmp, glist->group->qgroupid,
|
|
|
+ ptr_to_u64(glist->group),
|
|
|
+ GFP_ATOMIC);
|
|
|
+ if (ret < 0)
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* This adjusts the counters for all referenced qgroups if need be.
|
|
|
*/
|