mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
nfs: adapt to breakup of struct file_lock
Most of the existing APIs have remained the same, but subsystems that access file_lock fields directly need to reach into struct file_lock_core now. Signed-off-by: Jeff Layton <jlayton@kernel.org> Link: https://lore.kernel.org/r/20240131-flsplit-v3-41-c6129007ee8d@kernel.org Reviewed-by: NeilBrown <neilb@suse.de> Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
committed by
Christian Brauner
parent
eb8ed7c6ab
commit
dd1fac6ae6
@@ -6800,7 +6800,7 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock
|
||||
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
|
||||
switch (status) {
|
||||
case 0:
|
||||
request->fl_type = F_UNLCK;
|
||||
request->c.flc_type = F_UNLCK;
|
||||
break;
|
||||
case -NFS4ERR_DENIED:
|
||||
status = 0;
|
||||
@@ -7018,8 +7018,8 @@ static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
|
||||
/* Ensure this is an unlock - when canceling a lock, the
|
||||
* canceled lock is passed in, and it won't be an unlock.
|
||||
*/
|
||||
fl->fl_type = F_UNLCK;
|
||||
if (fl->fl_flags & FL_CLOSE)
|
||||
fl->c.flc_type = F_UNLCK;
|
||||
if (fl->c.flc_flags & FL_CLOSE)
|
||||
set_bit(NFS_CONTEXT_UNLOCK, &ctx->flags);
|
||||
|
||||
data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
|
||||
@@ -7045,11 +7045,11 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
|
||||
struct rpc_task *task;
|
||||
struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
|
||||
int status = 0;
|
||||
unsigned char saved_flags = request->fl_flags;
|
||||
unsigned char saved_flags = request->c.flc_flags;
|
||||
|
||||
status = nfs4_set_lock_state(state, request);
|
||||
/* Unlock _before_ we do the RPC call */
|
||||
request->fl_flags |= FL_EXISTS;
|
||||
request->c.flc_flags |= FL_EXISTS;
|
||||
/* Exclude nfs_delegation_claim_locks() */
|
||||
mutex_lock(&sp->so_delegreturn_mutex);
|
||||
/* Exclude nfs4_reclaim_open_stateid() - note nesting! */
|
||||
@@ -7073,14 +7073,16 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
|
||||
status = -ENOMEM;
|
||||
if (IS_ERR(seqid))
|
||||
goto out;
|
||||
task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
|
||||
task = nfs4_do_unlck(request,
|
||||
nfs_file_open_context(request->c.flc_file),
|
||||
lsp, seqid);
|
||||
status = PTR_ERR(task);
|
||||
if (IS_ERR(task))
|
||||
goto out;
|
||||
status = rpc_wait_for_completion_task(task);
|
||||
rpc_put_task(task);
|
||||
out:
|
||||
request->fl_flags = saved_flags;
|
||||
request->c.flc_flags = saved_flags;
|
||||
trace_nfs4_unlock(request, state, F_SETLK, status);
|
||||
return status;
|
||||
}
|
||||
@@ -7191,7 +7193,7 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
|
||||
renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)),
|
||||
data->timestamp);
|
||||
if (data->arg.new_lock && !data->cancelled) {
|
||||
data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
|
||||
data->fl.c.flc_flags &= ~(FL_SLEEP | FL_ACCESS);
|
||||
if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0)
|
||||
goto out_restart;
|
||||
}
|
||||
@@ -7292,7 +7294,8 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
|
||||
if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE))
|
||||
task_setup_data.flags |= RPC_TASK_MOVEABLE;
|
||||
|
||||
data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
|
||||
data = nfs4_alloc_lockdata(fl,
|
||||
nfs_file_open_context(fl->c.flc_file),
|
||||
fl->fl_u.nfs4_fl.owner, GFP_KERNEL);
|
||||
if (data == NULL)
|
||||
return -ENOMEM;
|
||||
@@ -7398,10 +7401,10 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
|
||||
{
|
||||
struct nfs_inode *nfsi = NFS_I(state->inode);
|
||||
struct nfs4_state_owner *sp = state->owner;
|
||||
unsigned char flags = request->fl_flags;
|
||||
unsigned char flags = request->c.flc_flags;
|
||||
int status;
|
||||
|
||||
request->fl_flags |= FL_ACCESS;
|
||||
request->c.flc_flags |= FL_ACCESS;
|
||||
status = locks_lock_inode_wait(state->inode, request);
|
||||
if (status < 0)
|
||||
goto out;
|
||||
@@ -7410,7 +7413,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
|
||||
if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
|
||||
/* Yes: cache locks! */
|
||||
/* ...but avoid races with delegation recall... */
|
||||
request->fl_flags = flags & ~FL_SLEEP;
|
||||
request->c.flc_flags = flags & ~FL_SLEEP;
|
||||
status = locks_lock_inode_wait(state->inode, request);
|
||||
up_read(&nfsi->rwsem);
|
||||
mutex_unlock(&sp->so_delegreturn_mutex);
|
||||
@@ -7420,7 +7423,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
|
||||
mutex_unlock(&sp->so_delegreturn_mutex);
|
||||
status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
|
||||
out:
|
||||
request->fl_flags = flags;
|
||||
request->c.flc_flags = flags;
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -7571,7 +7574,7 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
|
||||
if (state == NULL)
|
||||
return -ENOLCK;
|
||||
|
||||
if ((request->fl_flags & FL_POSIX) &&
|
||||
if ((request->c.flc_flags & FL_POSIX) &&
|
||||
!test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
|
||||
return -ENOLCK;
|
||||
|
||||
@@ -7579,7 +7582,7 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
|
||||
* Don't rely on the VFS having checked the file open mode,
|
||||
* since it won't do this for flock() locks.
|
||||
*/
|
||||
switch (request->fl_type) {
|
||||
switch (request->c.flc_type) {
|
||||
case F_RDLCK:
|
||||
if (!(filp->f_mode & FMODE_READ))
|
||||
return -EBADF;
|
||||
|
||||
Reference in New Issue
Block a user