|
@@ -1081,13 +1081,8 @@ static void free_full_branch(struct inode *inode, u64 ind_block, int depth)
|
|
for (i = 0; i < uspi->s_apb; i++) {
|
|
for (i = 0; i < uspi->s_apb; i++) {
|
|
void *p = ubh_get_data_ptr(uspi, ubh, i);
|
|
void *p = ubh_get_data_ptr(uspi, ubh, i);
|
|
u64 block = ufs_data_ptr_to_cpu(sb, p);
|
|
u64 block = ufs_data_ptr_to_cpu(sb, p);
|
|
- if (block) {
|
|
|
|
- write_seqlock(&UFS_I(inode)->meta_lock);
|
|
|
|
- ufs_data_ptr_clear(uspi, p);
|
|
|
|
- write_sequnlock(&UFS_I(inode)->meta_lock);
|
|
|
|
|
|
+ if (block)
|
|
free_full_branch(inode, block, depth);
|
|
free_full_branch(inode, block, depth);
|
|
- ubh_mark_buffer_dirty(ubh);
|
|
|
|
- }
|
|
|
|
}
|
|
}
|
|
} else {
|
|
} else {
|
|
struct to_free ctx = {.inode = inode};
|
|
struct to_free ctx = {.inode = inode};
|
|
@@ -1095,13 +1090,8 @@ static void free_full_branch(struct inode *inode, u64 ind_block, int depth)
|
|
for (i = 0; i < uspi->s_apb; i++) {
|
|
for (i = 0; i < uspi->s_apb; i++) {
|
|
void *p = ubh_get_data_ptr(uspi, ubh, i);
|
|
void *p = ubh_get_data_ptr(uspi, ubh, i);
|
|
u64 block = ufs_data_ptr_to_cpu(sb, p);
|
|
u64 block = ufs_data_ptr_to_cpu(sb, p);
|
|
- if (block) {
|
|
|
|
- write_seqlock(&UFS_I(inode)->meta_lock);
|
|
|
|
- ufs_data_ptr_clear(uspi, p);
|
|
|
|
- write_sequnlock(&UFS_I(inode)->meta_lock);
|
|
|
|
- ubh_mark_buffer_dirty(ubh);
|
|
|
|
|
|
+ if (block)
|
|
free_data(&ctx, block, uspi->s_fpb);
|
|
free_data(&ctx, block, uspi->s_fpb);
|
|
- }
|
|
|
|
}
|
|
}
|
|
free_data(&ctx, 0, 0);
|
|
free_data(&ctx, 0, 0);
|
|
}
|
|
}
|