btrfs: avoid multiple i_size rounding in btrfs_truncate()

We have the inode locked so no one can concurrently change its i_size and
neither do we change it ourselves, so there's no point in keep rounding
it in the while loop and setting it up in the control structure. That only
causes confusion when reading the code.

So move all the i_size setup and rounding out of the loop and assert the
inode is locked.

Reviewed-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: Anand Jain <asj@kernel.org>
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Filipe Manana
2025-10-12 10:39:08 +01:00
committed by David Sterba
parent b917a94a4c
commit 3b7c0c20b7

View File

@@ -7656,6 +7656,7 @@ static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
.ino = btrfs_ino(inode),
.min_type = BTRFS_EXTENT_DATA_KEY,
.clear_extent_range = true,
.new_size = inode->vfs_inode.i_size,
};
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
@@ -7663,12 +7664,14 @@ static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
int ret;
struct btrfs_trans_handle *trans;
const u64 min_size = btrfs_calc_metadata_size(fs_info, 1);
const u64 lock_start = round_down(inode->vfs_inode.i_size, fs_info->sectorsize);
const u64 i_size_up = round_up(inode->vfs_inode.i_size, fs_info->sectorsize);
/* Our inode is locked and the i_size can't be changed concurrently. */
btrfs_assert_inode_locked(inode);
if (!skip_writeback) {
ret = btrfs_wait_ordered_range(inode,
round_down(inode->vfs_inode.i_size,
fs_info->sectorsize),
(u64)-1);
ret = btrfs_wait_ordered_range(inode, lock_start, (u64)-1);
if (ret)
return ret;
}
@@ -7732,19 +7735,14 @@ static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
while (1) {
struct extent_state *cached_state = NULL;
const u64 new_size = inode->vfs_inode.i_size;
const u64 lock_start = round_down(new_size, fs_info->sectorsize);
control.new_size = new_size;
btrfs_lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
/*
* We want to drop from the next block forward in case this new
* size is not block aligned since we will be keeping the last
* block of the extent just the way it is.
*/
btrfs_drop_extent_map_range(inode,
round_up(new_size, fs_info->sectorsize),
(u64)-1, false);
btrfs_drop_extent_map_range(inode, i_size_up, (u64)-1, false);
ret = btrfs_truncate_inode_items(trans, root, &control);