Merge tag 'for-6.18-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs fixes from David Sterba:

 - in tree-checker fix extref bounds check

 - reorder send context structure to avoid
   -Wflex-array-member-not-at-end warning

 - fix extent readahead length for compressed extents

 - fix memory leaks on error paths (qgroup assign ioctl, zone loading
   with raid stripe tree enabled)

 - fix how device specific mount options are applied, in particular the
   'ssd' option will be set unexpectedly

 - fix tracking of relocation state when tasks are running and
   cancellation is attempted

 - adjust assertion condition for folios allocated for scrub

 - remove incorrect assertion checking for block group when populating
   free space tree

* tag 'for-6.18-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  btrfs: send: fix -Wflex-array-member-not-at-end warning in struct send_ctx
  btrfs: tree-checker: fix bounds check in check_inode_extref()
  btrfs: fix memory leaks when rejecting a non SINGLE data profile without an RST
  btrfs: fix incorrect readahead expansion length
  btrfs: do not assert we found block group item when creating free space tree
  btrfs: do not use folio_test_partial_kmap() in ASSERT()s
  btrfs: only set the device specific options after devices are opened
  btrfs: fix memory leak on duplicated memory in the qgroup assign ioctl
  btrfs: fix clearing of BTRFS_FS_RELOC_RUNNING if relocation already running
This commit is contained in:
Linus Torvalds
2025-10-16 10:22:38 -07:00
9 changed files with 25 additions and 22 deletions

View File

@@ -973,7 +973,7 @@ static void btrfs_readahead_expand(struct readahead_control *ractl,
{
const u64 ra_pos = readahead_pos(ractl);
const u64 ra_end = ra_pos + readahead_length(ractl);
const u64 em_end = em->start + em->ram_bytes;
const u64 em_end = em->start + em->len;
/* No expansion for holes and inline extents. */
if (em->disk_bytenr > EXTENT_MAP_LAST_BYTE)

View File

@@ -1106,14 +1106,15 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
* If ret is 1 (no key found), it means this is an empty block group,
* without any extents allocated from it and there's no block group
* item (key BTRFS_BLOCK_GROUP_ITEM_KEY) located in the extent tree
* because we are using the block group tree feature, so block group
* items are stored in the block group tree. It also means there are no
* extents allocated for block groups with a start offset beyond this
* block group's end offset (this is the last, highest, block group).
* because we are using the block group tree feature (so block group
* items are stored in the block group tree) or this is a new block
* group created in the current transaction and its block group item
* was not yet inserted in the extent tree (that happens in
* btrfs_create_pending_block_groups() -> insert_block_group_item()).
* It also means there are no extents allocated for block groups with a
* start offset beyond this block group's end offset (this is the last,
* highest, block group).
*/
if (!btrfs_fs_compat_ro(trans->fs_info, BLOCK_GROUP_TREE))
ASSERT(ret == 0);
start = block_group->start;
end = block_group->start + block_group->length;
while (ret == 0) {

View File

@@ -3740,7 +3740,7 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
if (!prealloc) {
ret = -ENOMEM;
goto drop_write;
goto out;
}
}

View File

@@ -3780,6 +3780,7 @@ out:
/*
* Mark start of chunk relocation that is cancellable. Check if the cancellation
* has been requested meanwhile and don't start in that case.
* NOTE: if this returns an error, reloc_chunk_end() must not be called.
*
* Return:
* 0 success
@@ -3796,10 +3797,8 @@ static int reloc_chunk_start(struct btrfs_fs_info *fs_info)
if (atomic_read(&fs_info->reloc_cancel_req) > 0) {
btrfs_info(fs_info, "chunk relocation canceled on start");
/*
* On cancel, clear all requests but let the caller mark
* the end after cleanup operations.
*/
/* On cancel, clear all requests. */
clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags);
atomic_set(&fs_info->reloc_cancel_req, 0);
return -ECANCELED;
}
@@ -3808,9 +3807,11 @@ static int reloc_chunk_start(struct btrfs_fs_info *fs_info)
/*
* Mark end of chunk relocation that is cancellable and wake any waiters.
* NOTE: call only if a previous call to reloc_chunk_start() succeeded.
*/
static void reloc_chunk_end(struct btrfs_fs_info *fs_info)
{
ASSERT(test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags));
/* Requested after start, clear bit first so any waiters can continue */
if (atomic_read(&fs_info->reloc_cancel_req) > 0)
btrfs_info(fs_info, "chunk relocation canceled during operation");
@@ -4023,9 +4024,9 @@ out:
if (err && rw)
btrfs_dec_block_group_ro(rc->block_group);
iput(rc->data_inode);
reloc_chunk_end(fs_info);
out_put_bg:
btrfs_put_block_group(bg);
reloc_chunk_end(fs_info);
free_reloc_control(rc);
return err;
}
@@ -4208,8 +4209,8 @@ out_clean:
ret = ret2;
out_unset:
unset_reloc_control(rc);
out_end:
reloc_chunk_end(fs_info);
out_end:
free_reloc_control(rc);
out:
free_reloc_roots(&reloc_roots);

View File

@@ -694,7 +694,7 @@ static void *scrub_stripe_get_kaddr(struct scrub_stripe *stripe, int sector_nr)
/* stripe->folios[] is allocated by us and no highmem is allowed. */
ASSERT(folio);
ASSERT(!folio_test_partial_kmap(folio));
ASSERT(!folio_test_highmem(folio));
return folio_address(folio) + offset_in_folio(folio, offset);
}
@@ -707,7 +707,7 @@ static phys_addr_t scrub_stripe_get_paddr(struct scrub_stripe *stripe, int secto
/* stripe->folios[] is allocated by us and no highmem is allowed. */
ASSERT(folio);
ASSERT(!folio_test_partial_kmap(folio));
ASSERT(!folio_test_highmem(folio));
/* And the range must be contained inside the folio. */
ASSERT(offset_in_folio(folio, offset) + fs_info->sectorsize <= folio_size(folio));
return page_to_phys(folio_page(folio, 0)) + offset_in_folio(folio, offset);

View File

@@ -178,7 +178,6 @@ struct send_ctx {
u64 cur_inode_rdev;
u64 cur_inode_last_extent;
u64 cur_inode_next_write_offset;
struct fs_path cur_inode_path;
bool cur_inode_new;
bool cur_inode_new_gen;
bool cur_inode_deleted;
@@ -305,6 +304,9 @@ struct send_ctx {
struct btrfs_lru_cache dir_created_cache;
struct btrfs_lru_cache dir_utimes_cache;
/* Must be last as it ends in a flexible-array member. */
struct fs_path cur_inode_path;
};
struct pending_dir_move {

View File

@@ -1900,8 +1900,6 @@ static int btrfs_get_tree_super(struct fs_context *fc)
return PTR_ERR(sb);
}
set_device_specific_options(fs_info);
if (sb->s_root) {
/*
* Not the first mount of the fs thus got an existing super block.
@@ -1946,6 +1944,7 @@ static int btrfs_get_tree_super(struct fs_context *fc)
deactivate_locked_super(sb);
return -EACCES;
}
set_device_specific_options(fs_info);
bdev = fs_devices->latest_dev->bdev;
snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev);
shrinker_debugfs_rename(sb->s_shrink, "sb-btrfs:%s", sb->s_id);

View File

@@ -1797,7 +1797,7 @@ static int check_inode_extref(struct extent_buffer *leaf,
struct btrfs_inode_extref *extref = (struct btrfs_inode_extref *)ptr;
u16 namelen;
if (unlikely(ptr + sizeof(*extref)) > end) {
if (unlikely(ptr + sizeof(*extref) > end)) {
inode_ref_err(leaf, slot,
"inode extref overflow, ptr %lu end %lu inode_extref size %zu",
ptr, end, sizeof(*extref));

View File

@@ -1753,7 +1753,7 @@ out:
!fs_info->stripe_root) {
btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
btrfs_bg_type_to_raid_name(map->type));
return -EINVAL;
ret = -EINVAL;
}
if (unlikely(cache->alloc_offset > cache->zone_capacity)) {