Merge tag 'for-6.18-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs fixes from David Sterba:

 - in tree-checker fix extref bounds check

 - reorder send context structure to avoid
   -Wflex-array-member-not-at-end warning

 - fix extent readahead length for compressed extents

 - fix memory leaks on error paths (qgroup assign ioctl, zone loading
   with raid stripe tree enabled)

 - fix how device specific mount options are applied, in particular the
   'ssd' option will be set unexpectedly

 - fix tracking of relocation state when tasks are running and
   cancellation is attempted

 - adjust assertion condition for folios allocated for scrub

 - remove incorrect assertion checking for block group when populating
   free space tree

* tag 'for-6.18-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  btrfs: send: fix -Wflex-array-member-not-at-end warning in struct send_ctx
  btrfs: tree-checker: fix bounds check in check_inode_extref()
  btrfs: fix memory leaks when rejecting a non SINGLE data profile without an RST
  btrfs: fix incorrect readahead expansion length
  btrfs: do not assert we found block group item when creating free space tree
  btrfs: do not use folio_test_partial_kmap() in ASSERT()s
  btrfs: only set the device specific options after devices are opened
  btrfs: fix memory leak on duplicated memory in the qgroup assign ioctl
  btrfs: fix clearing of BTRFS_FS_RELOC_RUNNING if relocation already running
This commit is contained in:
Linus Torvalds
2025-10-16 10:22:38 -07:00
9 changed files with 25 additions and 22 deletions

View File

@@ -973,7 +973,7 @@ static void btrfs_readahead_expand(struct readahead_control *ractl,
{ {
const u64 ra_pos = readahead_pos(ractl); const u64 ra_pos = readahead_pos(ractl);
const u64 ra_end = ra_pos + readahead_length(ractl); const u64 ra_end = ra_pos + readahead_length(ractl);
const u64 em_end = em->start + em->ram_bytes; const u64 em_end = em->start + em->len;
/* No expansion for holes and inline extents. */ /* No expansion for holes and inline extents. */
if (em->disk_bytenr > EXTENT_MAP_LAST_BYTE) if (em->disk_bytenr > EXTENT_MAP_LAST_BYTE)

View File

@@ -1106,14 +1106,15 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
* If ret is 1 (no key found), it means this is an empty block group, * If ret is 1 (no key found), it means this is an empty block group,
* without any extents allocated from it and there's no block group * without any extents allocated from it and there's no block group
* item (key BTRFS_BLOCK_GROUP_ITEM_KEY) located in the extent tree * item (key BTRFS_BLOCK_GROUP_ITEM_KEY) located in the extent tree
* because we are using the block group tree feature, so block group * because we are using the block group tree feature (so block group
* items are stored in the block group tree. It also means there are no * items are stored in the block group tree) or this is a new block
* extents allocated for block groups with a start offset beyond this * group created in the current transaction and its block group item
* block group's end offset (this is the last, highest, block group). * was not yet inserted in the extent tree (that happens in
* btrfs_create_pending_block_groups() -> insert_block_group_item()).
* It also means there are no extents allocated for block groups with a
* start offset beyond this block group's end offset (this is the last,
* highest, block group).
*/ */
if (!btrfs_fs_compat_ro(trans->fs_info, BLOCK_GROUP_TREE))
ASSERT(ret == 0);
start = block_group->start; start = block_group->start;
end = block_group->start + block_group->length; end = block_group->start + block_group->length;
while (ret == 0) { while (ret == 0) {

View File

@@ -3740,7 +3740,7 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL); prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
if (!prealloc) { if (!prealloc) {
ret = -ENOMEM; ret = -ENOMEM;
goto drop_write; goto out;
} }
} }

View File

@@ -3780,6 +3780,7 @@ out:
/* /*
* Mark start of chunk relocation that is cancellable. Check if the cancellation * Mark start of chunk relocation that is cancellable. Check if the cancellation
* has been requested meanwhile and don't start in that case. * has been requested meanwhile and don't start in that case.
* NOTE: if this returns an error, reloc_chunk_end() must not be called.
* *
* Return: * Return:
* 0 success * 0 success
@@ -3796,10 +3797,8 @@ static int reloc_chunk_start(struct btrfs_fs_info *fs_info)
if (atomic_read(&fs_info->reloc_cancel_req) > 0) { if (atomic_read(&fs_info->reloc_cancel_req) > 0) {
btrfs_info(fs_info, "chunk relocation canceled on start"); btrfs_info(fs_info, "chunk relocation canceled on start");
/* /* On cancel, clear all requests. */
* On cancel, clear all requests but let the caller mark clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags);
* the end after cleanup operations.
*/
atomic_set(&fs_info->reloc_cancel_req, 0); atomic_set(&fs_info->reloc_cancel_req, 0);
return -ECANCELED; return -ECANCELED;
} }
@@ -3808,9 +3807,11 @@ static int reloc_chunk_start(struct btrfs_fs_info *fs_info)
/* /*
* Mark end of chunk relocation that is cancellable and wake any waiters. * Mark end of chunk relocation that is cancellable and wake any waiters.
* NOTE: call only if a previous call to reloc_chunk_start() succeeded.
*/ */
static void reloc_chunk_end(struct btrfs_fs_info *fs_info) static void reloc_chunk_end(struct btrfs_fs_info *fs_info)
{ {
ASSERT(test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags));
/* Requested after start, clear bit first so any waiters can continue */ /* Requested after start, clear bit first so any waiters can continue */
if (atomic_read(&fs_info->reloc_cancel_req) > 0) if (atomic_read(&fs_info->reloc_cancel_req) > 0)
btrfs_info(fs_info, "chunk relocation canceled during operation"); btrfs_info(fs_info, "chunk relocation canceled during operation");
@@ -4023,9 +4024,9 @@ out:
if (err && rw) if (err && rw)
btrfs_dec_block_group_ro(rc->block_group); btrfs_dec_block_group_ro(rc->block_group);
iput(rc->data_inode); iput(rc->data_inode);
reloc_chunk_end(fs_info);
out_put_bg: out_put_bg:
btrfs_put_block_group(bg); btrfs_put_block_group(bg);
reloc_chunk_end(fs_info);
free_reloc_control(rc); free_reloc_control(rc);
return err; return err;
} }
@@ -4208,8 +4209,8 @@ out_clean:
ret = ret2; ret = ret2;
out_unset: out_unset:
unset_reloc_control(rc); unset_reloc_control(rc);
out_end:
reloc_chunk_end(fs_info); reloc_chunk_end(fs_info);
out_end:
free_reloc_control(rc); free_reloc_control(rc);
out: out:
free_reloc_roots(&reloc_roots); free_reloc_roots(&reloc_roots);

View File

@@ -694,7 +694,7 @@ static void *scrub_stripe_get_kaddr(struct scrub_stripe *stripe, int sector_nr)
/* stripe->folios[] is allocated by us and no highmem is allowed. */ /* stripe->folios[] is allocated by us and no highmem is allowed. */
ASSERT(folio); ASSERT(folio);
ASSERT(!folio_test_partial_kmap(folio)); ASSERT(!folio_test_highmem(folio));
return folio_address(folio) + offset_in_folio(folio, offset); return folio_address(folio) + offset_in_folio(folio, offset);
} }
@@ -707,7 +707,7 @@ static phys_addr_t scrub_stripe_get_paddr(struct scrub_stripe *stripe, int secto
/* stripe->folios[] is allocated by us and no highmem is allowed. */ /* stripe->folios[] is allocated by us and no highmem is allowed. */
ASSERT(folio); ASSERT(folio);
ASSERT(!folio_test_partial_kmap(folio)); ASSERT(!folio_test_highmem(folio));
/* And the range must be contained inside the folio. */ /* And the range must be contained inside the folio. */
ASSERT(offset_in_folio(folio, offset) + fs_info->sectorsize <= folio_size(folio)); ASSERT(offset_in_folio(folio, offset) + fs_info->sectorsize <= folio_size(folio));
return page_to_phys(folio_page(folio, 0)) + offset_in_folio(folio, offset); return page_to_phys(folio_page(folio, 0)) + offset_in_folio(folio, offset);

View File

@@ -178,7 +178,6 @@ struct send_ctx {
u64 cur_inode_rdev; u64 cur_inode_rdev;
u64 cur_inode_last_extent; u64 cur_inode_last_extent;
u64 cur_inode_next_write_offset; u64 cur_inode_next_write_offset;
struct fs_path cur_inode_path;
bool cur_inode_new; bool cur_inode_new;
bool cur_inode_new_gen; bool cur_inode_new_gen;
bool cur_inode_deleted; bool cur_inode_deleted;
@@ -305,6 +304,9 @@ struct send_ctx {
struct btrfs_lru_cache dir_created_cache; struct btrfs_lru_cache dir_created_cache;
struct btrfs_lru_cache dir_utimes_cache; struct btrfs_lru_cache dir_utimes_cache;
/* Must be last as it ends in a flexible-array member. */
struct fs_path cur_inode_path;
}; };
struct pending_dir_move { struct pending_dir_move {

View File

@@ -1900,8 +1900,6 @@ static int btrfs_get_tree_super(struct fs_context *fc)
return PTR_ERR(sb); return PTR_ERR(sb);
} }
set_device_specific_options(fs_info);
if (sb->s_root) { if (sb->s_root) {
/* /*
* Not the first mount of the fs thus got an existing super block. * Not the first mount of the fs thus got an existing super block.
@@ -1946,6 +1944,7 @@ static int btrfs_get_tree_super(struct fs_context *fc)
deactivate_locked_super(sb); deactivate_locked_super(sb);
return -EACCES; return -EACCES;
} }
set_device_specific_options(fs_info);
bdev = fs_devices->latest_dev->bdev; bdev = fs_devices->latest_dev->bdev;
snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev); snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev);
shrinker_debugfs_rename(sb->s_shrink, "sb-btrfs:%s", sb->s_id); shrinker_debugfs_rename(sb->s_shrink, "sb-btrfs:%s", sb->s_id);

View File

@@ -1797,7 +1797,7 @@ static int check_inode_extref(struct extent_buffer *leaf,
struct btrfs_inode_extref *extref = (struct btrfs_inode_extref *)ptr; struct btrfs_inode_extref *extref = (struct btrfs_inode_extref *)ptr;
u16 namelen; u16 namelen;
if (unlikely(ptr + sizeof(*extref)) > end) { if (unlikely(ptr + sizeof(*extref) > end)) {
inode_ref_err(leaf, slot, inode_ref_err(leaf, slot,
"inode extref overflow, ptr %lu end %lu inode_extref size %zu", "inode extref overflow, ptr %lu end %lu inode_extref size %zu",
ptr, end, sizeof(*extref)); ptr, end, sizeof(*extref));

View File

@@ -1753,7 +1753,7 @@ out:
!fs_info->stripe_root) { !fs_info->stripe_root) {
btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree", btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
btrfs_bg_type_to_raid_name(map->type)); btrfs_bg_type_to_raid_name(map->type));
return -EINVAL; ret = -EINVAL;
} }
if (unlikely(cache->alloc_offset > cache->zone_capacity)) { if (unlikely(cache->alloc_offset > cache->zone_capacity)) {