mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
Merge patch series "filemap_* writeback interface cleanups v2"
Christoph Hellwig <hch@lst.de> says: While looking at the filemap writeback code, I think adding filemap_fdatawrite_wbc ended up being a mistake, as all but the original btrfs caller should be using better high level interfaces instead. This series removes all these, switches btrfs to a more specific interfaces and also cleans up another too low-level interface. With this the writeback_control that is passed to the writeback code is only initialized in three places, although there are a lot more places in file system code that never reach the common writeback code. * patches from https://patch.msgid.link/20251024080431.324236-1-hch@lst.de: mm: rename filemap_fdatawrite_range_kick to filemap_flush_range mm: remove __filemap_fdatawrite_range mm: remove filemap_fdatawrite_wbc mm: remove __filemap_fdatawrite mm,btrfs: add a filemap_flush_nr helper btrfs: push struct writeback_control into start_delalloc_inodes btrfs: use the local tmp_inode variable in start_delalloc_inodes ocfs2: don't opencode filemap_fdatawrite_range in ocfs2_journal_submit_inode_data_buffers 9p: don't opencode filemap_fdatawrite_range in v9fs_mmap_vm_close mm: don't opencode filemap_fdatawrite_range in filemap_invalidate_inode Link: https://patch.msgid.link/20251024080431.324236-1-hch@lst.de Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
@@ -483,24 +483,15 @@ v9fs_vm_page_mkwrite(struct vm_fault *vmf)
|
||||
|
||||
static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
|
||||
{
|
||||
struct inode *inode;
|
||||
|
||||
struct writeback_control wbc = {
|
||||
.nr_to_write = LONG_MAX,
|
||||
.sync_mode = WB_SYNC_ALL,
|
||||
.range_start = (loff_t)vma->vm_pgoff * PAGE_SIZE,
|
||||
/* absolute end, byte at end included */
|
||||
.range_end = (loff_t)vma->vm_pgoff * PAGE_SIZE +
|
||||
(vma->vm_end - vma->vm_start - 1),
|
||||
};
|
||||
|
||||
if (!(vma->vm_flags & VM_SHARED))
|
||||
return;
|
||||
|
||||
p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
|
||||
|
||||
inode = file_inode(vma->vm_file);
|
||||
filemap_fdatawrite_wbc(inode->i_mapping, &wbc);
|
||||
filemap_fdatawrite_range(file_inode(vma->vm_file)->i_mapping,
|
||||
(loff_t)vma->vm_pgoff * PAGE_SIZE,
|
||||
(loff_t)vma->vm_pgoff * PAGE_SIZE +
|
||||
(vma->vm_end - vma->vm_start - 1));
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
|
||||
|
||||
@@ -8709,15 +8709,13 @@ static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode
|
||||
* some fairly slow code that needs optimization. This walks the list
|
||||
* of all the inodes with pending delalloc and forces them to disk.
|
||||
*/
|
||||
static int start_delalloc_inodes(struct btrfs_root *root,
|
||||
struct writeback_control *wbc, bool snapshot,
|
||||
bool in_reclaim_context)
|
||||
static int start_delalloc_inodes(struct btrfs_root *root, long *nr_to_write,
|
||||
bool snapshot, bool in_reclaim_context)
|
||||
{
|
||||
struct btrfs_delalloc_work *work, *next;
|
||||
LIST_HEAD(works);
|
||||
LIST_HEAD(splice);
|
||||
int ret = 0;
|
||||
bool full_flush = wbc->nr_to_write == LONG_MAX;
|
||||
|
||||
mutex_lock(&root->delalloc_mutex);
|
||||
spin_lock(&root->delalloc_lock);
|
||||
@@ -8743,10 +8741,10 @@ static int start_delalloc_inodes(struct btrfs_root *root,
|
||||
|
||||
if (snapshot)
|
||||
set_bit(BTRFS_INODE_SNAPSHOT_FLUSH, &inode->runtime_flags);
|
||||
if (full_flush) {
|
||||
work = btrfs_alloc_delalloc_work(&inode->vfs_inode);
|
||||
if (nr_to_write == NULL) {
|
||||
work = btrfs_alloc_delalloc_work(tmp_inode);
|
||||
if (!work) {
|
||||
iput(&inode->vfs_inode);
|
||||
iput(tmp_inode);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
@@ -8754,9 +8752,11 @@ static int start_delalloc_inodes(struct btrfs_root *root,
|
||||
btrfs_queue_work(root->fs_info->flush_workers,
|
||||
&work->work);
|
||||
} else {
|
||||
ret = filemap_fdatawrite_wbc(inode->vfs_inode.i_mapping, wbc);
|
||||
ret = filemap_flush_nr(tmp_inode->i_mapping,
|
||||
nr_to_write);
|
||||
btrfs_add_delayed_iput(inode);
|
||||
if (ret || wbc->nr_to_write <= 0)
|
||||
|
||||
if (ret || *nr_to_write <= 0)
|
||||
goto out;
|
||||
}
|
||||
cond_resched();
|
||||
@@ -8782,29 +8782,17 @@ out:
|
||||
|
||||
int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context)
|
||||
{
|
||||
struct writeback_control wbc = {
|
||||
.nr_to_write = LONG_MAX,
|
||||
.sync_mode = WB_SYNC_NONE,
|
||||
.range_start = 0,
|
||||
.range_end = LLONG_MAX,
|
||||
};
|
||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||
|
||||
if (BTRFS_FS_ERROR(fs_info))
|
||||
return -EROFS;
|
||||
|
||||
return start_delalloc_inodes(root, &wbc, true, in_reclaim_context);
|
||||
return start_delalloc_inodes(root, NULL, true, in_reclaim_context);
|
||||
}
|
||||
|
||||
int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
|
||||
bool in_reclaim_context)
|
||||
{
|
||||
struct writeback_control wbc = {
|
||||
.nr_to_write = nr,
|
||||
.sync_mode = WB_SYNC_NONE,
|
||||
.range_start = 0,
|
||||
.range_end = LLONG_MAX,
|
||||
};
|
||||
long *nr_to_write = nr == LONG_MAX ? NULL : &nr;
|
||||
struct btrfs_root *root;
|
||||
LIST_HEAD(splice);
|
||||
int ret;
|
||||
@@ -8816,13 +8804,6 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
|
||||
spin_lock(&fs_info->delalloc_root_lock);
|
||||
list_splice_init(&fs_info->delalloc_roots, &splice);
|
||||
while (!list_empty(&splice)) {
|
||||
/*
|
||||
* Reset nr_to_write here so we know that we're doing a full
|
||||
* flush.
|
||||
*/
|
||||
if (nr == LONG_MAX)
|
||||
wbc.nr_to_write = LONG_MAX;
|
||||
|
||||
root = list_first_entry(&splice, struct btrfs_root,
|
||||
delalloc_root);
|
||||
root = btrfs_grab_root(root);
|
||||
@@ -8831,9 +8812,10 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
|
||||
&fs_info->delalloc_roots);
|
||||
spin_unlock(&fs_info->delalloc_root_lock);
|
||||
|
||||
ret = start_delalloc_inodes(root, &wbc, false, in_reclaim_context);
|
||||
ret = start_delalloc_inodes(root, nr_to_write, false,
|
||||
in_reclaim_context);
|
||||
btrfs_put_root(root);
|
||||
if (ret < 0 || wbc.nr_to_write <= 0)
|
||||
if (ret < 0 || nr <= 0)
|
||||
goto out;
|
||||
spin_lock(&fs_info->delalloc_root_lock);
|
||||
}
|
||||
|
||||
@@ -822,9 +822,9 @@ static void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
|
||||
* @wbc: writeback_control of interest
|
||||
* @inode: target inode
|
||||
*
|
||||
* This function is to be used by __filemap_fdatawrite_range(), which is an
|
||||
* alternative entry point into writeback code, and first ensures @inode is
|
||||
* associated with a bdi_writeback and attaches it to @wbc.
|
||||
* This function is to be used by filemap_writeback(), which is an alternative
|
||||
* entry point into writeback code, and first ensures @inode is associated with
|
||||
* a bdi_writeback and attaches it to @wbc.
|
||||
*/
|
||||
void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
|
||||
struct inode *inode)
|
||||
|
||||
@@ -902,15 +902,8 @@ bail:
|
||||
|
||||
static int ocfs2_journal_submit_inode_data_buffers(struct jbd2_inode *jinode)
|
||||
{
|
||||
struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
|
||||
struct writeback_control wbc = {
|
||||
.sync_mode = WB_SYNC_ALL,
|
||||
.nr_to_write = mapping->nrpages * 2,
|
||||
.range_start = jinode->i_dirty_start,
|
||||
.range_end = jinode->i_dirty_end,
|
||||
};
|
||||
|
||||
return filemap_fdatawrite_wbc(mapping, &wbc);
|
||||
return filemap_fdatawrite_range(jinode->i_vfs_inode->i_mapping,
|
||||
jinode->i_dirty_start, jinode->i_dirty_end);
|
||||
}
|
||||
|
||||
int ocfs2_journal_init(struct ocfs2_super *osb, int *dirty)
|
||||
|
||||
10
fs/sync.c
10
fs/sync.c
@@ -280,14 +280,12 @@ int sync_file_range(struct file *file, loff_t offset, loff_t nbytes,
|
||||
}
|
||||
|
||||
if (flags & SYNC_FILE_RANGE_WRITE) {
|
||||
int sync_mode = WB_SYNC_NONE;
|
||||
|
||||
if ((flags & SYNC_FILE_RANGE_WRITE_AND_WAIT) ==
|
||||
SYNC_FILE_RANGE_WRITE_AND_WAIT)
|
||||
sync_mode = WB_SYNC_ALL;
|
||||
|
||||
ret = __filemap_fdatawrite_range(mapping, offset, endbyte,
|
||||
sync_mode);
|
||||
ret = filemap_fdatawrite_range(mapping, offset,
|
||||
endbyte);
|
||||
else
|
||||
ret = filemap_flush_range(mapping, offset, endbyte);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -3014,7 +3014,7 @@ extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart,
|
||||
extern int __must_check file_check_and_advance_wb_err(struct file *file);
|
||||
extern int __must_check file_write_and_wait_range(struct file *file,
|
||||
loff_t start, loff_t end);
|
||||
int filemap_fdatawrite_range_kick(struct address_space *mapping, loff_t start,
|
||||
int filemap_flush_range(struct address_space *mapping, loff_t start,
|
||||
loff_t end);
|
||||
|
||||
static inline int file_write_and_wait(struct file *file)
|
||||
@@ -3051,8 +3051,8 @@ static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count)
|
||||
} else if (iocb->ki_flags & IOCB_DONTCACHE) {
|
||||
struct address_space *mapping = iocb->ki_filp->f_mapping;
|
||||
|
||||
filemap_fdatawrite_range_kick(mapping, iocb->ki_pos - count,
|
||||
iocb->ki_pos - 1);
|
||||
filemap_flush_range(mapping, iocb->ki_pos - count,
|
||||
iocb->ki_pos - 1);
|
||||
}
|
||||
|
||||
return count;
|
||||
|
||||
@@ -38,6 +38,7 @@ int filemap_invalidate_pages(struct address_space *mapping,
|
||||
int write_inode_now(struct inode *, int sync);
|
||||
int filemap_fdatawrite(struct address_space *);
|
||||
int filemap_flush(struct address_space *);
|
||||
int filemap_flush_nr(struct address_space *mapping, long *nr_to_write);
|
||||
int filemap_fdatawait_keep_errors(struct address_space *mapping);
|
||||
int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend);
|
||||
int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
|
||||
@@ -53,14 +54,10 @@ static inline int filemap_fdatawait(struct address_space *mapping)
|
||||
bool filemap_range_has_page(struct address_space *, loff_t lstart, loff_t lend);
|
||||
int filemap_write_and_wait_range(struct address_space *mapping,
|
||||
loff_t lstart, loff_t lend);
|
||||
int __filemap_fdatawrite_range(struct address_space *mapping,
|
||||
loff_t start, loff_t end, int sync_mode);
|
||||
int filemap_fdatawrite_range(struct address_space *mapping,
|
||||
loff_t start, loff_t end);
|
||||
int filemap_check_errors(struct address_space *mapping);
|
||||
void __filemap_set_wb_err(struct address_space *mapping, int err);
|
||||
int filemap_fdatawrite_wbc(struct address_space *mapping,
|
||||
struct writeback_control *wbc);
|
||||
int kiocb_write_and_wait(struct kiocb *iocb, size_t count);
|
||||
|
||||
static inline int filemap_write_and_wait(struct address_space *mapping)
|
||||
|
||||
@@ -111,8 +111,7 @@ int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
|
||||
spin_unlock(&file->f_lock);
|
||||
break;
|
||||
case POSIX_FADV_DONTNEED:
|
||||
__filemap_fdatawrite_range(mapping, offset, endbyte,
|
||||
WB_SYNC_NONE);
|
||||
filemap_flush_range(mapping, offset, endbyte);
|
||||
|
||||
/*
|
||||
* First and last FULL page! Partial pages are deliberately
|
||||
|
||||
115
mm/filemap.c
115
mm/filemap.c
@@ -366,83 +366,60 @@ static int filemap_check_and_keep_errors(struct address_space *mapping)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* filemap_fdatawrite_wbc - start writeback on mapping dirty pages in range
|
||||
* @mapping: address space structure to write
|
||||
* @wbc: the writeback_control controlling the writeout
|
||||
*
|
||||
* Call writepages on the mapping using the provided wbc to control the
|
||||
* writeout.
|
||||
*
|
||||
* Return: %0 on success, negative error code otherwise.
|
||||
*/
|
||||
int filemap_fdatawrite_wbc(struct address_space *mapping,
|
||||
struct writeback_control *wbc)
|
||||
static int filemap_writeback(struct address_space *mapping, loff_t start,
|
||||
loff_t end, enum writeback_sync_modes sync_mode,
|
||||
long *nr_to_write)
|
||||
{
|
||||
struct writeback_control wbc = {
|
||||
.sync_mode = sync_mode,
|
||||
.nr_to_write = nr_to_write ? *nr_to_write : LONG_MAX,
|
||||
.range_start = start,
|
||||
.range_end = end,
|
||||
};
|
||||
int ret;
|
||||
|
||||
if (!mapping_can_writeback(mapping) ||
|
||||
!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
|
||||
return 0;
|
||||
|
||||
wbc_attach_fdatawrite_inode(wbc, mapping->host);
|
||||
ret = do_writepages(mapping, wbc);
|
||||
wbc_detach_inode(wbc);
|
||||
wbc_attach_fdatawrite_inode(&wbc, mapping->host);
|
||||
ret = do_writepages(mapping, &wbc);
|
||||
wbc_detach_inode(&wbc);
|
||||
|
||||
if (!ret && nr_to_write)
|
||||
*nr_to_write = wbc.nr_to_write;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(filemap_fdatawrite_wbc);
|
||||
|
||||
/**
|
||||
* __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
|
||||
* filemap_fdatawrite_range - start writeback on mapping dirty pages in range
|
||||
* @mapping: address space structure to write
|
||||
* @start: offset in bytes where the range starts
|
||||
* @end: offset in bytes where the range ends (inclusive)
|
||||
* @sync_mode: enable synchronous operation
|
||||
*
|
||||
* Start writeback against all of a mapping's dirty pages that lie
|
||||
* within the byte offsets <start, end> inclusive.
|
||||
*
|
||||
* If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
|
||||
* opposed to a regular memory cleansing writeback. The difference between
|
||||
* these two operations is that if a dirty page/buffer is encountered, it must
|
||||
* be waited upon, and not just skipped over.
|
||||
* This is a data integrity operation that waits upon dirty or in writeback
|
||||
* pages.
|
||||
*
|
||||
* Return: %0 on success, negative error code otherwise.
|
||||
*/
|
||||
int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
|
||||
loff_t end, int sync_mode)
|
||||
{
|
||||
struct writeback_control wbc = {
|
||||
.sync_mode = sync_mode,
|
||||
.nr_to_write = LONG_MAX,
|
||||
.range_start = start,
|
||||
.range_end = end,
|
||||
};
|
||||
|
||||
return filemap_fdatawrite_wbc(mapping, &wbc);
|
||||
}
|
||||
|
||||
static inline int __filemap_fdatawrite(struct address_space *mapping,
|
||||
int sync_mode)
|
||||
{
|
||||
return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
|
||||
}
|
||||
|
||||
int filemap_fdatawrite(struct address_space *mapping)
|
||||
{
|
||||
return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
|
||||
}
|
||||
EXPORT_SYMBOL(filemap_fdatawrite);
|
||||
|
||||
int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
|
||||
loff_t end)
|
||||
loff_t end)
|
||||
{
|
||||
return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
|
||||
return filemap_writeback(mapping, start, end, WB_SYNC_ALL, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(filemap_fdatawrite_range);
|
||||
|
||||
int filemap_fdatawrite(struct address_space *mapping)
|
||||
{
|
||||
return filemap_fdatawrite_range(mapping, 0, LLONG_MAX);
|
||||
}
|
||||
EXPORT_SYMBOL(filemap_fdatawrite);
|
||||
|
||||
/**
|
||||
* filemap_fdatawrite_range_kick - start writeback on a range
|
||||
* filemap_flush_range - start writeback on a range
|
||||
* @mapping: target address_space
|
||||
* @start: index to start writeback on
|
||||
* @end: last (inclusive) index for writeback
|
||||
@@ -452,12 +429,12 @@ EXPORT_SYMBOL(filemap_fdatawrite_range);
|
||||
*
|
||||
* Return: %0 on success, negative error code otherwise.
|
||||
*/
|
||||
int filemap_fdatawrite_range_kick(struct address_space *mapping, loff_t start,
|
||||
int filemap_flush_range(struct address_space *mapping, loff_t start,
|
||||
loff_t end)
|
||||
{
|
||||
return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_NONE);
|
||||
return filemap_writeback(mapping, start, end, WB_SYNC_NONE, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(filemap_fdatawrite_range_kick);
|
||||
EXPORT_SYMBOL_GPL(filemap_flush_range);
|
||||
|
||||
/**
|
||||
* filemap_flush - mostly a non-blocking flush
|
||||
@@ -470,10 +447,22 @@ EXPORT_SYMBOL_GPL(filemap_fdatawrite_range_kick);
|
||||
*/
|
||||
int filemap_flush(struct address_space *mapping)
|
||||
{
|
||||
return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
|
||||
return filemap_flush_range(mapping, 0, LLONG_MAX);
|
||||
}
|
||||
EXPORT_SYMBOL(filemap_flush);
|
||||
|
||||
/*
|
||||
* Start writeback on @nr_to_write pages from @mapping. No one but the existing
|
||||
* btrfs caller should be using this. Talk to linux-mm if you think adding a
|
||||
* new caller is a good idea.
|
||||
*/
|
||||
int filemap_flush_nr(struct address_space *mapping, long *nr_to_write)
|
||||
{
|
||||
return filemap_writeback(mapping, 0, LLONG_MAX, WB_SYNC_NONE,
|
||||
nr_to_write);
|
||||
}
|
||||
EXPORT_SYMBOL_FOR_MODULES(filemap_flush_nr, "btrfs");
|
||||
|
||||
/**
|
||||
* filemap_range_has_page - check if a page exists in range.
|
||||
* @mapping: address space within which to check
|
||||
@@ -691,8 +680,7 @@ int filemap_write_and_wait_range(struct address_space *mapping,
|
||||
return 0;
|
||||
|
||||
if (mapping_needs_writeback(mapping)) {
|
||||
err = __filemap_fdatawrite_range(mapping, lstart, lend,
|
||||
WB_SYNC_ALL);
|
||||
err = filemap_fdatawrite_range(mapping, lstart, lend);
|
||||
/*
|
||||
* Even if the above returned error, the pages may be
|
||||
* written partially (e.g. -ENOSPC), so we wait for it.
|
||||
@@ -794,8 +782,7 @@ int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend)
|
||||
return 0;
|
||||
|
||||
if (mapping_needs_writeback(mapping)) {
|
||||
err = __filemap_fdatawrite_range(mapping, lstart, lend,
|
||||
WB_SYNC_ALL);
|
||||
err = filemap_fdatawrite_range(mapping, lstart, lend);
|
||||
/* See comment of filemap_write_and_wait() */
|
||||
if (err != -EIO)
|
||||
__filemap_fdatawait_range(mapping, lstart, lend);
|
||||
@@ -4457,16 +4444,8 @@ int filemap_invalidate_inode(struct inode *inode, bool flush,
|
||||
unmap_mapping_pages(mapping, first, nr, false);
|
||||
|
||||
/* Write back the data if we're asked to. */
|
||||
if (flush) {
|
||||
struct writeback_control wbc = {
|
||||
.sync_mode = WB_SYNC_ALL,
|
||||
.nr_to_write = LONG_MAX,
|
||||
.range_start = start,
|
||||
.range_end = end,
|
||||
};
|
||||
|
||||
filemap_fdatawrite_wbc(mapping, &wbc);
|
||||
}
|
||||
if (flush)
|
||||
filemap_fdatawrite_range(mapping, start, end);
|
||||
|
||||
/* Wait for writeback to complete on all folios and discard. */
|
||||
invalidate_inode_pages2_range(mapping, start / PAGE_SIZE, end / PAGE_SIZE);
|
||||
|
||||
Reference in New Issue
Block a user