Browse Source

btrfs: convert btrfs_ordered_extent.refs from atomic_t to refcount_t

refcount_t type and corresponding API should be
used instead of atomic_t when the variable is used as
a reference counter. This allows to avoid accidental
refcounter overflows that might lead to use-after-free
situations.

Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
Signed-off-by: Hans Liljestrand <ishkamiel@gmail.com>
Signed-off-by: Kees Cook <keescook@chromium.org>
Signed-off-by: David Windsor <dwindsor@gmail.com>
Signed-off-by: David Sterba <dsterba@suse.com>
Elena Reshetova 8 years ago
parent
commit
e76edab7f0
3 changed files with 11 additions and 11 deletions
  1. 9 9
      fs/btrfs/ordered-data.c
  2. 1 1
      fs/btrfs/ordered-data.h
  3. 1 1
      include/trace/events/btrfs.h

+ 9 - 9
fs/btrfs/ordered-data.c

@@ -212,7 +212,7 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
 		set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
 		set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
 
 
 	/* one ref for the tree */
 	/* one ref for the tree */
-	atomic_set(&entry->refs, 1);
+	refcount_set(&entry->refs, 1);
 	init_waitqueue_head(&entry->wait);
 	init_waitqueue_head(&entry->wait);
 	INIT_LIST_HEAD(&entry->list);
 	INIT_LIST_HEAD(&entry->list);
 	INIT_LIST_HEAD(&entry->root_extent_list);
 	INIT_LIST_HEAD(&entry->root_extent_list);
@@ -358,7 +358,7 @@ int btrfs_dec_test_first_ordered_pending(struct inode *inode,
 out:
 out:
 	if (!ret && cached && entry) {
 	if (!ret && cached && entry) {
 		*cached = entry;
 		*cached = entry;
-		atomic_inc(&entry->refs);
+		refcount_inc(&entry->refs);
 	}
 	}
 	spin_unlock_irqrestore(&tree->lock, flags);
 	spin_unlock_irqrestore(&tree->lock, flags);
 	return ret == 0;
 	return ret == 0;
@@ -425,7 +425,7 @@ have_entry:
 out:
 out:
 	if (!ret && cached && entry) {
 	if (!ret && cached && entry) {
 		*cached = entry;
 		*cached = entry;
-		atomic_inc(&entry->refs);
+		refcount_inc(&entry->refs);
 	}
 	}
 	spin_unlock_irqrestore(&tree->lock, flags);
 	spin_unlock_irqrestore(&tree->lock, flags);
 	return ret == 0;
 	return ret == 0;
@@ -456,7 +456,7 @@ void btrfs_get_logged_extents(struct btrfs_inode *inode,
 		if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
 		if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
 			continue;
 			continue;
 		list_add(&ordered->log_list, logged_list);
 		list_add(&ordered->log_list, logged_list);
-		atomic_inc(&ordered->refs);
+		refcount_inc(&ordered->refs);
 	}
 	}
 	spin_unlock_irq(&tree->lock);
 	spin_unlock_irq(&tree->lock);
 }
 }
@@ -565,7 +565,7 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
 
 
 	trace_btrfs_ordered_extent_put(entry->inode, entry);
 	trace_btrfs_ordered_extent_put(entry->inode, entry);
 
 
-	if (atomic_dec_and_test(&entry->refs)) {
+	if (refcount_dec_and_test(&entry->refs)) {
 		ASSERT(list_empty(&entry->log_list));
 		ASSERT(list_empty(&entry->log_list));
 		ASSERT(list_empty(&entry->trans_list));
 		ASSERT(list_empty(&entry->trans_list));
 		ASSERT(list_empty(&entry->root_extent_list));
 		ASSERT(list_empty(&entry->root_extent_list));
@@ -690,7 +690,7 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr,
 
 
 		list_move_tail(&ordered->root_extent_list,
 		list_move_tail(&ordered->root_extent_list,
 			       &root->ordered_extents);
 			       &root->ordered_extents);
-		atomic_inc(&ordered->refs);
+		refcount_inc(&ordered->refs);
 		spin_unlock(&root->ordered_extent_lock);
 		spin_unlock(&root->ordered_extent_lock);
 
 
 		btrfs_init_work(&ordered->flush_work,
 		btrfs_init_work(&ordered->flush_work,
@@ -870,7 +870,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
 	if (!offset_in_entry(entry, file_offset))
 	if (!offset_in_entry(entry, file_offset))
 		entry = NULL;
 		entry = NULL;
 	if (entry)
 	if (entry)
-		atomic_inc(&entry->refs);
+		refcount_inc(&entry->refs);
 out:
 out:
 	spin_unlock_irq(&tree->lock);
 	spin_unlock_irq(&tree->lock);
 	return entry;
 	return entry;
@@ -911,7 +911,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
 	}
 	}
 out:
 out:
 	if (entry)
 	if (entry)
-		atomic_inc(&entry->refs);
+		refcount_inc(&entry->refs);
 	spin_unlock_irq(&tree->lock);
 	spin_unlock_irq(&tree->lock);
 	return entry;
 	return entry;
 }
 }
@@ -948,7 +948,7 @@ btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
 		goto out;
 		goto out;
 
 
 	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
-	atomic_inc(&entry->refs);
+	refcount_inc(&entry->refs);
 out:
 out:
 	spin_unlock_irq(&tree->lock);
 	spin_unlock_irq(&tree->lock);
 	return entry;
 	return entry;

+ 1 - 1
fs/btrfs/ordered-data.h

@@ -113,7 +113,7 @@ struct btrfs_ordered_extent {
 	int compress_type;
 	int compress_type;
 
 
 	/* reference count */
 	/* reference count */
-	atomic_t refs;
+	refcount_t refs;
 
 
 	/* the inode we belong to */
 	/* the inode we belong to */
 	struct inode *inode;
 	struct inode *inode;

+ 1 - 1
include/trace/events/btrfs.h

@@ -275,7 +275,7 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
 		__entry->bytes_left	= ordered->bytes_left;
 		__entry->bytes_left	= ordered->bytes_left;
 		__entry->flags		= ordered->flags;
 		__entry->flags		= ordered->flags;
 		__entry->compress_type	= ordered->compress_type;
 		__entry->compress_type	= ordered->compress_type;
-		__entry->refs		= atomic_read(&ordered->refs);
+		__entry->refs		= refcount_read(&ordered->refs);
 		__entry->root_objectid	=
 		__entry->root_objectid	=
 				BTRFS_I(inode)->root->root_key.objectid;
 				BTRFS_I(inode)->root->root_key.objectid;
 		__entry->truncated_len	= ordered->truncated_len;
 		__entry->truncated_len	= ordered->truncated_len;