btrfs: annotate btrfs_is_testing() as unlikely and make it return bool

We can annotate btrfs_is_testing() as unlikely since that's the most
expected scenario and it's desirable for the compiler to optimize for
the case we are not running the self tests. So add the annotation to
btrfs_is_testing() and while at it also make it return bool instead of
int.

Also make two of the existing callers use btrfs_is_testing() directly
instead of storing its result in a local variable.

On x86_64 with Debian's gcc 14.2.0-19 this resulted in a very tiny object
code reduction.

Before this change:

  $ size fs/btrfs/btrfs.ko
     text	   data	    bss	    dec	    hex	filename
  1913263	 161567	  15592	2090422	 1fe5b6	fs/btrfs/btrfs.ko

After this change:

  $ size fs/btrfs/btrfs.ko
     text	   data	    bss	    dec	    hex	filename
  1913257	 161567	  15592	2090416	 1fe5b0	fs/btrfs/btrfs.ko

Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Filipe Manana
2025-09-19 09:55:12 +01:00
committed by David Sterba
parent f07575bab6
commit db524fd980
3 changed files with 8 additions and 10 deletions

View File

@@ -1251,7 +1251,6 @@ void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans)
{
struct btrfs_delayed_ref_root *delayed_refs = &trans->delayed_refs;
struct btrfs_fs_info *fs_info = trans->fs_info;
bool testing = btrfs_is_testing(fs_info);
spin_lock(&delayed_refs->lock);
while (true) {
@@ -1281,7 +1280,7 @@ void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans)
spin_unlock(&delayed_refs->lock);
mutex_unlock(&head->mutex);
if (!testing && pin_bytes) {
if (!btrfs_is_testing(fs_info) && pin_bytes) {
struct btrfs_block_group *bg;
bg = btrfs_lookup_block_group(fs_info, head->bytenr);
@@ -1312,14 +1311,14 @@ void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans)
btrfs_error_unpin_extent_range(fs_info, head->bytenr,
head->bytenr + head->num_bytes - 1);
}
if (!testing)
if (!btrfs_is_testing(fs_info))
btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
btrfs_put_delayed_ref_head(head);
cond_resched();
spin_lock(&delayed_refs->lock);
}
if (!testing)
if (!btrfs_is_testing(fs_info))
btrfs_qgroup_destroy_extent_records(trans);
spin_unlock(&delayed_refs->lock);

View File

@@ -639,7 +639,6 @@ static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
u64 objectid, gfp_t flags)
{
struct btrfs_root *root;
bool dummy = btrfs_is_testing(fs_info);
root = kzalloc(sizeof(*root), flags);
if (!root)
@@ -696,7 +695,7 @@ static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
root->log_transid_committed = -1;
btrfs_set_root_last_log_commit(root, 0);
root->anon_dev = 0;
if (!dummy) {
if (!btrfs_is_testing(fs_info)) {
btrfs_extent_io_tree_init(fs_info, &root->dirty_log_pages,
IO_TREE_ROOT_DIRTY_LOG_PAGES);
btrfs_extent_io_tree_init(fs_info, &root->log_csum_range,

View File

@@ -1126,9 +1126,9 @@ static inline void btrfs_wake_unfinished_drop(struct btrfs_fs_info *fs_info)
#define EXPORT_FOR_TESTS
static inline int btrfs_is_testing(const struct btrfs_fs_info *fs_info)
static inline bool btrfs_is_testing(const struct btrfs_fs_info *fs_info)
{
return test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
return unlikely(test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state));
}
void btrfs_test_destroy_inode(struct inode *inode);
@@ -1137,9 +1137,9 @@ void btrfs_test_destroy_inode(struct inode *inode);
#define EXPORT_FOR_TESTS static
static inline int btrfs_is_testing(const struct btrfs_fs_info *fs_info)
static inline bool btrfs_is_testing(const struct btrfs_fs_info *fs_info)
{
return 0;
return false;
}
#endif