mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull btrfs updates from Chris Mason:
"This pull is mostly cleanups and fixes:
- The raid5/6 cleanups from Zhao Lei fixup some long standing warts
in the code and add improvements on top of the scrubbing support
from 3.19.
- Josef has round one of our ENOSPC fixes coming from large btrfs
clusters here at FB.
- Dave Sterba continues a long series of cleanups (thanks Dave), and
Filipe continues hammering on corner cases in fsync and others
This all was held up a little trying to track down a use-after-free in
btrfs raid5/6. It's not clear yet if this is just made easier to
trigger with this pull or if its a new bug from the raid5/6 cleanups.
Dave Sterba is the only one to trigger it so far, but he has a
consistent way to reproduce, so we'll get it nailed shortly"
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (68 commits)
Btrfs: don't remove extents and xattrs when logging new names
Btrfs: fix fsync data loss after adding hard link to inode
Btrfs: fix BUG_ON in btrfs_orphan_add() when delete unused block group
Btrfs: account for large extents with enospc
Btrfs: don't set and clear delalloc for O_DIRECT writes
Btrfs: only adjust outstanding_extents when we do a short write
btrfs: Fix out-of-space bug
Btrfs: scrub, fix sleep in atomic context
Btrfs: fix scheduler warning when syncing log
Btrfs: Remove unnecessary placeholder in btrfs_err_code
btrfs: cleanup init for list in free-space-cache
btrfs: delete chunk allocation attemp when setting block group ro
btrfs: clear bio reference after submit_one_bio()
Btrfs: fix scrub race leading to use-after-free
Btrfs: add missing cleanup on sysfs init failure
Btrfs: fix race between transaction commit and empty block group removal
btrfs: add more checks to btrfs_read_sys_array
btrfs: cleanup, rename a few variables in btrfs_read_sys_array
btrfs: add checks for sys_chunk_array sizes
btrfs: more superblock checks, lower bounds on devices and sectorsize/nodesize
...
This commit is contained in:
@@ -64,7 +64,7 @@ void btrfs_leak_debug_check(void)
|
||||
|
||||
while (!list_empty(&states)) {
|
||||
state = list_entry(states.next, struct extent_state, leak_list);
|
||||
pr_err("BTRFS: state leak: start %llu end %llu state %lu in tree %d refs %d\n",
|
||||
pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
|
||||
state->start, state->end, state->state,
|
||||
extent_state_in_tree(state),
|
||||
atomic_read(&state->refs));
|
||||
@@ -396,21 +396,21 @@ static void merge_state(struct extent_io_tree *tree,
|
||||
}
|
||||
|
||||
static void set_state_cb(struct extent_io_tree *tree,
|
||||
struct extent_state *state, unsigned long *bits)
|
||||
struct extent_state *state, unsigned *bits)
|
||||
{
|
||||
if (tree->ops && tree->ops->set_bit_hook)
|
||||
tree->ops->set_bit_hook(tree->mapping->host, state, bits);
|
||||
}
|
||||
|
||||
static void clear_state_cb(struct extent_io_tree *tree,
|
||||
struct extent_state *state, unsigned long *bits)
|
||||
struct extent_state *state, unsigned *bits)
|
||||
{
|
||||
if (tree->ops && tree->ops->clear_bit_hook)
|
||||
tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
|
||||
}
|
||||
|
||||
static void set_state_bits(struct extent_io_tree *tree,
|
||||
struct extent_state *state, unsigned long *bits);
|
||||
struct extent_state *state, unsigned *bits);
|
||||
|
||||
/*
|
||||
* insert an extent_state struct into the tree. 'bits' are set on the
|
||||
@@ -426,7 +426,7 @@ static int insert_state(struct extent_io_tree *tree,
|
||||
struct extent_state *state, u64 start, u64 end,
|
||||
struct rb_node ***p,
|
||||
struct rb_node **parent,
|
||||
unsigned long *bits)
|
||||
unsigned *bits)
|
||||
{
|
||||
struct rb_node *node;
|
||||
|
||||
@@ -511,10 +511,10 @@ static struct extent_state *next_state(struct extent_state *state)
|
||||
*/
|
||||
static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
|
||||
struct extent_state *state,
|
||||
unsigned long *bits, int wake)
|
||||
unsigned *bits, int wake)
|
||||
{
|
||||
struct extent_state *next;
|
||||
unsigned long bits_to_clear = *bits & ~EXTENT_CTLBITS;
|
||||
unsigned bits_to_clear = *bits & ~EXTENT_CTLBITS;
|
||||
|
||||
if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
|
||||
u64 range = state->end - state->start + 1;
|
||||
@@ -570,7 +570,7 @@ static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
|
||||
* This takes the tree lock, and returns 0 on success and < 0 on error.
|
||||
*/
|
||||
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
unsigned long bits, int wake, int delete,
|
||||
unsigned bits, int wake, int delete,
|
||||
struct extent_state **cached_state,
|
||||
gfp_t mask)
|
||||
{
|
||||
@@ -789,9 +789,9 @@ out:
|
||||
|
||||
static void set_state_bits(struct extent_io_tree *tree,
|
||||
struct extent_state *state,
|
||||
unsigned long *bits)
|
||||
unsigned *bits)
|
||||
{
|
||||
unsigned long bits_to_set = *bits & ~EXTENT_CTLBITS;
|
||||
unsigned bits_to_set = *bits & ~EXTENT_CTLBITS;
|
||||
|
||||
set_state_cb(tree, state, bits);
|
||||
if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
|
||||
@@ -803,7 +803,7 @@ static void set_state_bits(struct extent_io_tree *tree,
|
||||
|
||||
static void cache_state_if_flags(struct extent_state *state,
|
||||
struct extent_state **cached_ptr,
|
||||
const u64 flags)
|
||||
unsigned flags)
|
||||
{
|
||||
if (cached_ptr && !(*cached_ptr)) {
|
||||
if (!flags || (state->state & flags)) {
|
||||
@@ -833,7 +833,7 @@ static void cache_state(struct extent_state *state,
|
||||
|
||||
static int __must_check
|
||||
__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
unsigned long bits, unsigned long exclusive_bits,
|
||||
unsigned bits, unsigned exclusive_bits,
|
||||
u64 *failed_start, struct extent_state **cached_state,
|
||||
gfp_t mask)
|
||||
{
|
||||
@@ -1034,7 +1034,7 @@ search_again:
|
||||
}
|
||||
|
||||
int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
unsigned long bits, u64 * failed_start,
|
||||
unsigned bits, u64 * failed_start,
|
||||
struct extent_state **cached_state, gfp_t mask)
|
||||
{
|
||||
return __set_extent_bit(tree, start, end, bits, 0, failed_start,
|
||||
@@ -1060,7 +1060,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
* boundary bits like LOCK.
|
||||
*/
|
||||
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
unsigned long bits, unsigned long clear_bits,
|
||||
unsigned bits, unsigned clear_bits,
|
||||
struct extent_state **cached_state, gfp_t mask)
|
||||
{
|
||||
struct extent_state *state;
|
||||
@@ -1268,14 +1268,14 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
}
|
||||
|
||||
int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
unsigned long bits, gfp_t mask)
|
||||
unsigned bits, gfp_t mask)
|
||||
{
|
||||
return set_extent_bit(tree, start, end, bits, NULL,
|
||||
NULL, mask);
|
||||
}
|
||||
|
||||
int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
unsigned long bits, gfp_t mask)
|
||||
unsigned bits, gfp_t mask)
|
||||
{
|
||||
return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
|
||||
}
|
||||
@@ -1330,10 +1330,11 @@ int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
* us if waiting is desired.
|
||||
*/
|
||||
int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
unsigned long bits, struct extent_state **cached_state)
|
||||
unsigned bits, struct extent_state **cached_state)
|
||||
{
|
||||
int err;
|
||||
u64 failed_start;
|
||||
|
||||
while (1) {
|
||||
err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
|
||||
EXTENT_LOCKED, &failed_start,
|
||||
@@ -1440,7 +1441,7 @@ static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
|
||||
*/
|
||||
static struct extent_state *
|
||||
find_first_extent_bit_state(struct extent_io_tree *tree,
|
||||
u64 start, unsigned long bits)
|
||||
u64 start, unsigned bits)
|
||||
{
|
||||
struct rb_node *node;
|
||||
struct extent_state *state;
|
||||
@@ -1474,7 +1475,7 @@ out:
|
||||
* If nothing was found, 1 is returned. If found something, return 0.
|
||||
*/
|
||||
int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
|
||||
u64 *start_ret, u64 *end_ret, unsigned long bits,
|
||||
u64 *start_ret, u64 *end_ret, unsigned bits,
|
||||
struct extent_state **cached_state)
|
||||
{
|
||||
struct extent_state *state;
|
||||
@@ -1753,7 +1754,7 @@ out_failed:
|
||||
|
||||
int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
|
||||
struct page *locked_page,
|
||||
unsigned long clear_bits,
|
||||
unsigned clear_bits,
|
||||
unsigned long page_ops)
|
||||
{
|
||||
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
|
||||
@@ -1810,7 +1811,7 @@ int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
|
||||
*/
|
||||
u64 count_range_bits(struct extent_io_tree *tree,
|
||||
u64 *start, u64 search_end, u64 max_bytes,
|
||||
unsigned long bits, int contig)
|
||||
unsigned bits, int contig)
|
||||
{
|
||||
struct rb_node *node;
|
||||
struct extent_state *state;
|
||||
@@ -1928,7 +1929,7 @@ out:
|
||||
* range is found set.
|
||||
*/
|
||||
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
unsigned long bits, int filled, struct extent_state *cached)
|
||||
unsigned bits, int filled, struct extent_state *cached)
|
||||
{
|
||||
struct extent_state *state = NULL;
|
||||
struct rb_node *node;
|
||||
@@ -2057,7 +2058,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
|
||||
sector = bbio->stripes[mirror_num-1].physical >> 9;
|
||||
bio->bi_iter.bi_sector = sector;
|
||||
dev = bbio->stripes[mirror_num-1].dev;
|
||||
kfree(bbio);
|
||||
btrfs_put_bbio(bbio);
|
||||
if (!dev || !dev->bdev || !dev->writeable) {
|
||||
bio_put(bio);
|
||||
return -EIO;
|
||||
@@ -2816,8 +2817,10 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
|
||||
bio_add_page(bio, page, page_size, offset) < page_size) {
|
||||
ret = submit_one_bio(rw, bio, mirror_num,
|
||||
prev_bio_flags);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
*bio_ret = NULL;
|
||||
return ret;
|
||||
}
|
||||
bio = NULL;
|
||||
} else {
|
||||
return 0;
|
||||
@@ -3239,7 +3242,7 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode,
|
||||
page,
|
||||
&delalloc_start,
|
||||
&delalloc_end,
|
||||
128 * 1024 * 1024);
|
||||
BTRFS_MAX_EXTENT_SIZE);
|
||||
if (nr_delalloc == 0) {
|
||||
delalloc_start = delalloc_end + 1;
|
||||
continue;
|
||||
@@ -4598,11 +4601,11 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
|
||||
|
||||
static struct extent_buffer *
|
||||
__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
|
||||
unsigned long len, gfp_t mask)
|
||||
unsigned long len)
|
||||
{
|
||||
struct extent_buffer *eb = NULL;
|
||||
|
||||
eb = kmem_cache_zalloc(extent_buffer_cache, mask);
|
||||
eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS);
|
||||
if (eb == NULL)
|
||||
return NULL;
|
||||
eb->start = start;
|
||||
@@ -4643,7 +4646,7 @@ struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
|
||||
struct extent_buffer *new;
|
||||
unsigned long num_pages = num_extent_pages(src->start, src->len);
|
||||
|
||||
new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_NOFS);
|
||||
new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
|
||||
if (new == NULL)
|
||||
return NULL;
|
||||
|
||||
@@ -4666,13 +4669,26 @@ struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
|
||||
return new;
|
||||
}
|
||||
|
||||
struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
|
||||
struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
|
||||
u64 start)
|
||||
{
|
||||
struct extent_buffer *eb;
|
||||
unsigned long num_pages = num_extent_pages(0, len);
|
||||
unsigned long len;
|
||||
unsigned long num_pages;
|
||||
unsigned long i;
|
||||
|
||||
eb = __alloc_extent_buffer(NULL, start, len, GFP_NOFS);
|
||||
if (!fs_info) {
|
||||
/*
|
||||
* Called only from tests that don't always have a fs_info
|
||||
* available, but we know that nodesize is 4096
|
||||
*/
|
||||
len = 4096;
|
||||
} else {
|
||||
len = fs_info->tree_root->nodesize;
|
||||
}
|
||||
num_pages = num_extent_pages(0, len);
|
||||
|
||||
eb = __alloc_extent_buffer(fs_info, start, len);
|
||||
if (!eb)
|
||||
return NULL;
|
||||
|
||||
@@ -4762,7 +4778,7 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
|
||||
|
||||
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
|
||||
struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
|
||||
u64 start, unsigned long len)
|
||||
u64 start)
|
||||
{
|
||||
struct extent_buffer *eb, *exists = NULL;
|
||||
int ret;
|
||||
@@ -4770,7 +4786,7 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
|
||||
eb = find_extent_buffer(fs_info, start);
|
||||
if (eb)
|
||||
return eb;
|
||||
eb = alloc_dummy_extent_buffer(start, len);
|
||||
eb = alloc_dummy_extent_buffer(fs_info, start);
|
||||
if (!eb)
|
||||
return NULL;
|
||||
eb->fs_info = fs_info;
|
||||
@@ -4808,8 +4824,9 @@ free_eb:
|
||||
#endif
|
||||
|
||||
struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
|
||||
u64 start, unsigned long len)
|
||||
u64 start)
|
||||
{
|
||||
unsigned long len = fs_info->tree_root->nodesize;
|
||||
unsigned long num_pages = num_extent_pages(start, len);
|
||||
unsigned long i;
|
||||
unsigned long index = start >> PAGE_CACHE_SHIFT;
|
||||
@@ -4824,7 +4841,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
|
||||
if (eb)
|
||||
return eb;
|
||||
|
||||
eb = __alloc_extent_buffer(fs_info, start, len, GFP_NOFS);
|
||||
eb = __alloc_extent_buffer(fs_info, start, len);
|
||||
if (!eb)
|
||||
return NULL;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user