|
@@ -212,7 +212,7 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
|
|
set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
|
|
set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
|
|
|
|
|
|
/* one ref for the tree */
|
|
/* one ref for the tree */
|
|
- atomic_set(&entry->refs, 1);
|
|
|
|
|
|
+ refcount_set(&entry->refs, 1);
|
|
init_waitqueue_head(&entry->wait);
|
|
init_waitqueue_head(&entry->wait);
|
|
INIT_LIST_HEAD(&entry->list);
|
|
INIT_LIST_HEAD(&entry->list);
|
|
INIT_LIST_HEAD(&entry->root_extent_list);
|
|
INIT_LIST_HEAD(&entry->root_extent_list);
|
|
@@ -358,7 +358,7 @@ int btrfs_dec_test_first_ordered_pending(struct inode *inode,
|
|
out:
|
|
out:
|
|
if (!ret && cached && entry) {
|
|
if (!ret && cached && entry) {
|
|
*cached = entry;
|
|
*cached = entry;
|
|
- atomic_inc(&entry->refs);
|
|
|
|
|
|
+ refcount_inc(&entry->refs);
|
|
}
|
|
}
|
|
spin_unlock_irqrestore(&tree->lock, flags);
|
|
spin_unlock_irqrestore(&tree->lock, flags);
|
|
return ret == 0;
|
|
return ret == 0;
|
|
@@ -425,7 +425,7 @@ have_entry:
|
|
out:
|
|
out:
|
|
if (!ret && cached && entry) {
|
|
if (!ret && cached && entry) {
|
|
*cached = entry;
|
|
*cached = entry;
|
|
- atomic_inc(&entry->refs);
|
|
|
|
|
|
+ refcount_inc(&entry->refs);
|
|
}
|
|
}
|
|
spin_unlock_irqrestore(&tree->lock, flags);
|
|
spin_unlock_irqrestore(&tree->lock, flags);
|
|
return ret == 0;
|
|
return ret == 0;
|
|
@@ -456,7 +456,7 @@ void btrfs_get_logged_extents(struct btrfs_inode *inode,
|
|
if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
|
|
if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
|
|
continue;
|
|
continue;
|
|
list_add(&ordered->log_list, logged_list);
|
|
list_add(&ordered->log_list, logged_list);
|
|
- atomic_inc(&ordered->refs);
|
|
|
|
|
|
+ refcount_inc(&ordered->refs);
|
|
}
|
|
}
|
|
spin_unlock_irq(&tree->lock);
|
|
spin_unlock_irq(&tree->lock);
|
|
}
|
|
}
|
|
@@ -565,7 +565,7 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
|
|
|
|
|
|
trace_btrfs_ordered_extent_put(entry->inode, entry);
|
|
trace_btrfs_ordered_extent_put(entry->inode, entry);
|
|
|
|
|
|
- if (atomic_dec_and_test(&entry->refs)) {
|
|
|
|
|
|
+ if (refcount_dec_and_test(&entry->refs)) {
|
|
ASSERT(list_empty(&entry->log_list));
|
|
ASSERT(list_empty(&entry->log_list));
|
|
ASSERT(list_empty(&entry->trans_list));
|
|
ASSERT(list_empty(&entry->trans_list));
|
|
ASSERT(list_empty(&entry->root_extent_list));
|
|
ASSERT(list_empty(&entry->root_extent_list));
|
|
@@ -690,7 +690,7 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr,
|
|
|
|
|
|
list_move_tail(&ordered->root_extent_list,
|
|
list_move_tail(&ordered->root_extent_list,
|
|
&root->ordered_extents);
|
|
&root->ordered_extents);
|
|
- atomic_inc(&ordered->refs);
|
|
|
|
|
|
+ refcount_inc(&ordered->refs);
|
|
spin_unlock(&root->ordered_extent_lock);
|
|
spin_unlock(&root->ordered_extent_lock);
|
|
|
|
|
|
btrfs_init_work(&ordered->flush_work,
|
|
btrfs_init_work(&ordered->flush_work,
|
|
@@ -870,7 +870,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
|
|
if (!offset_in_entry(entry, file_offset))
|
|
if (!offset_in_entry(entry, file_offset))
|
|
entry = NULL;
|
|
entry = NULL;
|
|
if (entry)
|
|
if (entry)
|
|
- atomic_inc(&entry->refs);
|
|
|
|
|
|
+ refcount_inc(&entry->refs);
|
|
out:
|
|
out:
|
|
spin_unlock_irq(&tree->lock);
|
|
spin_unlock_irq(&tree->lock);
|
|
return entry;
|
|
return entry;
|
|
@@ -911,7 +911,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
|
|
}
|
|
}
|
|
out:
|
|
out:
|
|
if (entry)
|
|
if (entry)
|
|
- atomic_inc(&entry->refs);
|
|
|
|
|
|
+ refcount_inc(&entry->refs);
|
|
spin_unlock_irq(&tree->lock);
|
|
spin_unlock_irq(&tree->lock);
|
|
return entry;
|
|
return entry;
|
|
}
|
|
}
|
|
@@ -948,7 +948,7 @@ btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
|
|
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
|
|
- atomic_inc(&entry->refs);
|
|
|
|
|
|
+ refcount_inc(&entry->refs);
|
|
out:
|
|
out:
|
|
spin_unlock_irq(&tree->lock);
|
|
spin_unlock_irq(&tree->lock);
|
|
return entry;
|
|
return entry;
|