From 6021d62c677f12f2f975bb1bf2389730e1d9f746 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:01:44 -0500 Subject: filelock: rename fl_pid variable in lock_get_status In later patches we're going to introduce some macros that will clash with the variable name here. Rename it. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-3-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/locks.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/locks.c b/fs/locks.c index cc7c117ee192..1eceaa56e47f 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -2695,11 +2695,11 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl, loff_t id, char *pfx, int repeat) { struct inode *inode = NULL; - unsigned int fl_pid; + unsigned int pid; struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb); int type; - fl_pid = locks_translate_pid(fl, proc_pidns); + pid = locks_translate_pid(fl, proc_pidns); /* * If lock owner is dead (and pid is freed) or not visible in current * pidns, zero is shown as a pid value. Check lock info from @@ -2747,11 +2747,11 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl, (type == F_RDLCK) ? "READ" : "UNLCK"); if (inode) { /* userspace relies on this representation of dev_t */ - seq_printf(f, "%d %02x:%02x:%lu ", fl_pid, + seq_printf(f, "%d %02x:%02x:%lu ", pid, MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev), inode->i_ino); } else { - seq_printf(f, "%d :0 ", fl_pid); + seq_printf(f, "%d :0 ", pid); } if (IS_POSIX(fl)) { if (fl->fl_end == OFFSET_MAX) -- cgit v1.2.3 From 75cabec0111b7ccb140d917cc9c481e845cc3498 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:01:45 -0500 Subject: filelock: add some new helper functions In later patches we're going to embed some common fields into a new structure inside struct file_lock. Smooth the transition by adding some new helper functions, and converting the core file locking code to use them. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-4-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/locks.c | 18 +++++++++--------- include/linux/filelock.h | 44 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/locks.c b/fs/locks.c index 1eceaa56e47f..149070fd3b66 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -674,7 +674,7 @@ static void __locks_wake_up_blocks(struct file_lock *blocker) if (waiter->fl_lmops && waiter->fl_lmops->lm_notify) waiter->fl_lmops->lm_notify(waiter); else - wake_up(&waiter->fl_wait); + locks_wake_up(waiter); /* * The setting of fl_blocker to NULL marks the "done" @@ -841,9 +841,9 @@ locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose) static bool locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) { - if (sys_fl->fl_type == F_WRLCK) + if (lock_is_write(sys_fl)) return true; - if (caller_fl->fl_type == F_WRLCK) + if (lock_is_write(caller_fl)) return true; return false; } @@ -874,7 +874,7 @@ static bool posix_test_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) { /* F_UNLCK checks any locks on the same fd. */ - if (caller_fl->fl_type == F_UNLCK) { + if (lock_is_unlock(caller_fl)) { if (!posix_same_owner(caller_fl, sys_fl)) return false; return locks_overlap(caller_fl, sys_fl); @@ -1055,7 +1055,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request) break; } - if (request->fl_type == F_UNLCK) { + if (lock_is_unlock(request)) { if ((request->fl_flags & FL_EXISTS) && !found) error = -ENOENT; goto out; @@ -1107,7 +1107,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request, ctx = locks_get_lock_context(inode, request->fl_type); if (!ctx) - return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM; + return lock_is_unlock(request) ? 0 : -ENOMEM; /* * We may need two file_lock structures for this operation, @@ -1228,7 +1228,7 @@ retry: continue; if (fl->fl_start > request->fl_end) break; - if (request->fl_type == F_UNLCK) + if (lock_is_unlock(request)) added = true; if (fl->fl_start < request->fl_start) left = fl; @@ -1279,7 +1279,7 @@ retry: error = 0; if (!added) { - if (request->fl_type == F_UNLCK) { + if (lock_is_unlock(request)) { if (request->fl_flags & FL_EXISTS) error = -ENOENT; goto out; @@ -1608,7 +1608,7 @@ void lease_get_mtime(struct inode *inode, struct timespec64 *time) spin_lock(&ctx->flc_lock); fl = list_first_entry_or_null(&ctx->flc_lease, struct file_lock, fl_list); - if (fl && (fl->fl_type == F_WRLCK)) + if (fl && lock_is_write(fl)) has_lease = true; spin_unlock(&ctx->flc_lock); } diff --git a/include/linux/filelock.h b/include/linux/filelock.h index 085ff6ba0653..a3cb59b7922a 100644 --- a/include/linux/filelock.h +++ b/include/linux/filelock.h @@ -147,6 +147,29 @@ int fcntl_setlk64(unsigned int, struct file *, unsigned int, int fcntl_setlease(unsigned int fd, struct file *filp, int arg); int fcntl_getlease(struct file *filp); +static inline bool lock_is_unlock(struct file_lock *fl) +{ + return fl->fl_type == F_UNLCK; +} + +static inline bool lock_is_read(struct file_lock *fl) +{ + return fl->fl_type == F_RDLCK; +} + +static inline bool lock_is_write(struct file_lock *fl) +{ + return fl->fl_type == F_WRLCK; +} + +static inline void locks_wake_up(struct file_lock *fl) +{ + wake_up(&fl->fl_wait); +} + +/* for walking lists of file_locks linked by fl_list */ +#define for_each_file_lock(_fl, _head) list_for_each_entry(_fl, _head, fl_list) + /* fs/locks.c */ void locks_free_lock_context(struct inode *inode); void locks_free_lock(struct file_lock *fl); @@ -223,6 +246,27 @@ static inline int fcntl_getlease(struct file *filp) return F_UNLCK; } +static inline bool lock_is_unlock(struct file_lock *fl) +{ + return false; +} + +static inline bool lock_is_read(struct file_lock *fl) +{ + return false; +} + +static inline bool lock_is_write(struct file_lock *fl) +{ + return false; +} + +static inline void locks_wake_up(struct file_lock *fl) +{ +} + +#define for_each_file_lock(_fl, _head) while(false) + static inline void locks_free_lock_context(struct inode *inode) { -- cgit v1.2.3 From 75a1bbe60a7425fcaa670bff58b8589b1f880810 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:01:46 -0500 Subject: 9p: rename fl_type variable in v9fs_file_do_lock In later patches, we're going to introduce some macros that conflict with the variable name here. Rename it. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-5-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/9p/vfs_file.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index bae330c2f0cf..3df8aa1b5996 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c @@ -121,7 +121,6 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl) struct p9_fid *fid; uint8_t status = P9_LOCK_ERROR; int res = 0; - unsigned char fl_type; struct v9fs_session_info *v9ses; fid = filp->private_data; @@ -208,11 +207,12 @@ out_unlock: * it locally */ if (res < 0 && fl->fl_type != F_UNLCK) { - fl_type = fl->fl_type; + unsigned char type = fl->fl_type; + fl->fl_type = F_UNLCK; /* Even if this fails we want to return the remote error */ locks_lock_file_wait(filp, fl); - fl->fl_type = fl_type; + fl->fl_type = type; } if (flock.client_id != fid->clnt->name) kfree(flock.client_id); -- cgit v1.2.3 From 76698510f593daf8aa0582492090d0c2e484c3e8 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:01:47 -0500 Subject: afs: convert to using new filelock helpers Convert to using the new file locking helper functions. Also, in later patches we're going to introduce macros that conflict with the variable name in afs_next_locker. Rename it. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-6-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/afs/flock.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/afs/flock.c b/fs/afs/flock.c index 9c6dea3139f5..4eee3d1ca5ad 100644 --- a/fs/afs/flock.c +++ b/fs/afs/flock.c @@ -93,13 +93,13 @@ static void afs_grant_locks(struct afs_vnode *vnode) bool exclusive = (vnode->lock_type == AFS_LOCK_WRITE); list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) { - if (!exclusive && p->fl_type == F_WRLCK) + if (!exclusive && lock_is_write(p)) continue; list_move_tail(&p->fl_u.afs.link, &vnode->granted_locks); p->fl_u.afs.state = AFS_LOCK_GRANTED; trace_afs_flock_op(vnode, p, afs_flock_op_grant); - wake_up(&p->fl_wait); + locks_wake_up(p); } } @@ -112,25 +112,25 @@ static void afs_next_locker(struct afs_vnode *vnode, int error) { struct file_lock *p, *_p, *next = NULL; struct key *key = vnode->lock_key; - unsigned int fl_type = F_RDLCK; + unsigned int type = F_RDLCK; _enter(""); if (vnode->lock_type == AFS_LOCK_WRITE) - fl_type = F_WRLCK; + type = F_WRLCK; list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) { if (error && - p->fl_type == fl_type && + p->fl_type == type && afs_file_key(p->fl_file) == key) { list_del_init(&p->fl_u.afs.link); p->fl_u.afs.state = error; - wake_up(&p->fl_wait); + locks_wake_up(p); } /* Select the next locker to hand off to. */ if (next && - (next->fl_type == F_WRLCK || p->fl_type == F_RDLCK)) + (lock_is_write(next) || lock_is_read(p))) continue; next = p; } @@ -142,7 +142,7 @@ static void afs_next_locker(struct afs_vnode *vnode, int error) afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING); next->fl_u.afs.state = AFS_LOCK_YOUR_TRY; trace_afs_flock_op(vnode, next, afs_flock_op_wake); - wake_up(&next->fl_wait); + locks_wake_up(next); } else { afs_set_lock_state(vnode, AFS_VNODE_LOCK_NONE); trace_afs_flock_ev(vnode, NULL, afs_flock_no_lockers, 0); @@ -166,7 +166,7 @@ static void afs_kill_lockers_enoent(struct afs_vnode *vnode) struct file_lock, fl_u.afs.link); list_del_init(&p->fl_u.afs.link); p->fl_u.afs.state = -ENOENT; - wake_up(&p->fl_wait); + locks_wake_up(p); } key_put(vnode->lock_key); @@ -471,7 +471,7 @@ static int afs_do_setlk(struct file *file, struct file_lock *fl) fl->fl_u.afs.state = AFS_LOCK_PENDING; partial = (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX); - type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; + type = lock_is_read(fl) ? AFS_LOCK_READ : AFS_LOCK_WRITE; if (mode == afs_flock_mode_write && partial) type = AFS_LOCK_WRITE; @@ -734,7 +734,7 @@ static int afs_do_getlk(struct file *file, struct file_lock *fl) /* check local lock records first */ posix_test_lock(file, fl); - if (fl->fl_type == F_UNLCK) { + if (lock_is_unlock(fl)) { /* no local locks; consult the server */ ret = afs_fetch_status(vnode, key, false, NULL); if (ret < 0) @@ -778,7 +778,7 @@ int afs_lock(struct file *file, int cmd, struct file_lock *fl) fl->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id); trace_afs_flock_op(vnode, fl, afs_flock_op_lock); - if (fl->fl_type == F_UNLCK) + if (lock_is_unlock(fl)) ret = afs_do_unlk(file, fl); else ret = afs_do_setlk(file, fl); @@ -820,7 +820,7 @@ int afs_flock(struct file *file, int cmd, struct file_lock *fl) trace_afs_flock_op(vnode, fl, afs_flock_op_flock); /* we're simulating flock() locks using posix locks on the server */ - if (fl->fl_type == F_UNLCK) + if (lock_is_unlock(fl)) ret = afs_do_unlk(file, fl); else ret = afs_do_setlk(file, fl); -- cgit v1.2.3 From 75e9570c93c70e8ad7be59cce82cea3872d8e8a7 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:01:48 -0500 Subject: ceph: convert to using new filelock helpers Convert to using the new file locking helper functions. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-7-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/ceph/locks.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c index e07ad29ff8b9..80ebe1d6c67d 100644 --- a/fs/ceph/locks.c +++ b/fs/ceph/locks.c @@ -273,19 +273,19 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl) } spin_unlock(&ci->i_ceph_lock); if (err < 0) { - if (op == CEPH_MDS_OP_SETFILELOCK && F_UNLCK == fl->fl_type) + if (op == CEPH_MDS_OP_SETFILELOCK && lock_is_unlock(fl)) posix_lock_file(file, fl, NULL); return err; } - if (F_RDLCK == fl->fl_type) + if (lock_is_read(fl)) lock_cmd = CEPH_LOCK_SHARED; - else if (F_WRLCK == fl->fl_type) + else if (lock_is_write(fl)) lock_cmd = CEPH_LOCK_EXCL; else lock_cmd = CEPH_LOCK_UNLOCK; - if (op == CEPH_MDS_OP_SETFILELOCK && F_UNLCK == fl->fl_type) { + if (op == CEPH_MDS_OP_SETFILELOCK && lock_is_unlock(fl)) { err = try_unlock_file(file, fl); if (err <= 0) return err; @@ -333,7 +333,7 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl) } spin_unlock(&ci->i_ceph_lock); if (err < 0) { - if (F_UNLCK == fl->fl_type) + if (lock_is_unlock(fl)) locks_lock_file_wait(file, fl); return err; } @@ -341,14 +341,14 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl) if (IS_SETLKW(cmd)) wait = 1; - if (F_RDLCK == fl->fl_type) + if (lock_is_read(fl)) lock_cmd = CEPH_LOCK_SHARED; - else if (F_WRLCK == fl->fl_type) + else if (lock_is_write(fl)) lock_cmd = CEPH_LOCK_EXCL; else lock_cmd = CEPH_LOCK_UNLOCK; - if (F_UNLCK == fl->fl_type) { + if (lock_is_unlock(fl)) { err = try_unlock_file(file, fl); if (err <= 0) return err; @@ -385,9 +385,9 @@ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count) ctx = locks_inode_context(inode); if (ctx) { spin_lock(&ctx->flc_lock); - list_for_each_entry(lock, &ctx->flc_posix, fl_list) + for_each_file_lock(lock, &ctx->flc_posix) ++(*fcntl_count); - list_for_each_entry(lock, &ctx->flc_flock, fl_list) + for_each_file_lock(lock, &ctx->flc_flock) ++(*flock_count); spin_unlock(&ctx->flc_lock); } @@ -453,7 +453,7 @@ int ceph_encode_locks_to_buffer(struct inode *inode, return 0; spin_lock(&ctx->flc_lock); - list_for_each_entry(lock, &ctx->flc_posix, fl_list) { + for_each_file_lock(lock, &ctx->flc_posix) { ++seen_fcntl; if (seen_fcntl > num_fcntl_locks) { err = -ENOSPC; @@ -464,7 +464,7 @@ int ceph_encode_locks_to_buffer(struct inode *inode, goto fail; ++l; } - list_for_each_entry(lock, &ctx->flc_flock, fl_list) { + for_each_file_lock(lock, &ctx->flc_flock) { ++seen_flock; if (seen_flock > num_flock_locks) { err = -ENOSPC; -- cgit v1.2.3 From 11ff73082f17e1adc1b717264d9b74661fc5b229 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:01:49 -0500 Subject: dlm: convert to using new filelock helpers Convert to using the new file locking helper functions. Also, in later patches we're going to introduce some temporary macros with names that clash with the variable name in dlm_posix_unlock. Rename it. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-8-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/dlm/plock.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c index d814c5121367..42c596b900d4 100644 --- a/fs/dlm/plock.c +++ b/fs/dlm/plock.c @@ -139,7 +139,7 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file, op->info.optype = DLM_PLOCK_OP_LOCK; op->info.pid = fl->fl_pid; - op->info.ex = (fl->fl_type == F_WRLCK); + op->info.ex = (lock_is_write(fl)); op->info.wait = !!(fl->fl_flags & FL_SLEEP); op->info.fsid = ls->ls_global_id; op->info.number = number; @@ -291,7 +291,7 @@ int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file, struct dlm_ls *ls; struct plock_op *op; int rv; - unsigned char fl_flags = fl->fl_flags; + unsigned char saved_flags = fl->fl_flags; ls = dlm_find_lockspace_local(lockspace); if (!ls) @@ -345,7 +345,7 @@ out_free: dlm_release_plock_op(op); out: dlm_put_lockspace(ls); - fl->fl_flags = fl_flags; + fl->fl_flags = saved_flags; return rv; } EXPORT_SYMBOL_GPL(dlm_posix_unlock); @@ -376,7 +376,7 @@ int dlm_posix_cancel(dlm_lockspace_t *lockspace, u64 number, struct file *file, memset(&info, 0, sizeof(info)); info.pid = fl->fl_pid; - info.ex = (fl->fl_type == F_WRLCK); + info.ex = (lock_is_write(fl)); info.fsid = ls->ls_global_id; dlm_put_lockspace(ls); info.number = number; @@ -438,7 +438,7 @@ int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file, op->info.optype = DLM_PLOCK_OP_GET; op->info.pid = fl->fl_pid; - op->info.ex = (fl->fl_type == F_WRLCK); + op->info.ex = (lock_is_write(fl)); op->info.fsid = ls->ls_global_id; op->info.number = number; op->info.start = fl->fl_start; -- cgit v1.2.3 From b4c6d52d8a81c53a8759e382e8000597909f0615 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:01:50 -0500 Subject: gfs2: convert to using new filelock helpers Convert to using the new file locking helper functions. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-9-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/gfs2/file.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 992ca4effb50..6c25aea30f1b 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -1443,7 +1443,7 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl) if (!(fl->fl_flags & FL_POSIX)) return -ENOLCK; if (gfs2_withdrawing_or_withdrawn(sdp)) { - if (fl->fl_type == F_UNLCK) + if (lock_is_unlock(fl)) locks_lock_file_wait(file, fl); return -EIO; } @@ -1451,7 +1451,7 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl) return dlm_posix_cancel(ls->ls_dlm, ip->i_no_addr, file, fl); else if (IS_GETLK(cmd)) return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl); - else if (fl->fl_type == F_UNLCK) + else if (lock_is_unlock(fl)) return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl); else return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl); @@ -1483,7 +1483,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl) int error = 0; int sleeptime; - state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED; + state = (lock_is_write(fl)) ? LM_ST_EXCLUSIVE : LM_ST_SHARED; flags = GL_EXACT | GL_NOPID; if (!IS_SETLKW(cmd)) flags |= LM_FLAG_TRY_1CB; @@ -1560,7 +1560,7 @@ static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl) if (!(fl->fl_flags & FL_FLOCK)) return -ENOLCK; - if (fl->fl_type == F_UNLCK) { + if (lock_is_unlock(fl)) { do_unflock(file, fl); return 0; } else { -- cgit v1.2.3 From 872584f1bb983a688547509141b03b37bdb28840 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:01:51 -0500 Subject: lockd: convert to using new filelock helpers Convert to using the new file locking helper functions. Also in later patches we're going to introduce some macros with names that clash with the variable names in nlmclnt_lock. Rename them. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-10-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/lockd/clntproc.c | 20 ++++++++++---------- fs/lockd/svcsubs.c | 6 +++--- 2 files changed, 13 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c index fba6c7fa7474..cc596748e359 100644 --- a/fs/lockd/clntproc.c +++ b/fs/lockd/clntproc.c @@ -522,8 +522,8 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) struct nlm_host *host = req->a_host; struct nlm_res *resp = &req->a_res; struct nlm_wait block; - unsigned char fl_flags = fl->fl_flags; - unsigned char fl_type; + unsigned char flags = fl->fl_flags; + unsigned char type; __be32 b_status; int status = -ENOLCK; @@ -533,7 +533,7 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) fl->fl_flags |= FL_ACCESS; status = do_vfs_lock(fl); - fl->fl_flags = fl_flags; + fl->fl_flags = flags; if (status < 0) goto out; @@ -595,7 +595,7 @@ again: if (do_vfs_lock(fl) < 0) printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__); up_read(&host->h_rwsem); - fl->fl_flags = fl_flags; + fl->fl_flags = flags; status = 0; } if (status < 0) @@ -605,7 +605,7 @@ again: * cases NLM_LCK_DENIED is returned for a permanent error. So * turn it into an ENOLCK. */ - if (resp->status == nlm_lck_denied && (fl_flags & FL_SLEEP)) + if (resp->status == nlm_lck_denied && (flags & FL_SLEEP)) status = -ENOLCK; else status = nlm_stat_to_errno(resp->status); @@ -622,13 +622,13 @@ out_unlock: req->a_host->h_addrlen, req->a_res.status); dprintk("lockd: lock attempt ended in fatal error.\n" " Attempting to unlock.\n"); - fl_type = fl->fl_type; + type = fl->fl_type; fl->fl_type = F_UNLCK; down_read(&host->h_rwsem); do_vfs_lock(fl); up_read(&host->h_rwsem); - fl->fl_type = fl_type; - fl->fl_flags = fl_flags; + fl->fl_type = type; + fl->fl_flags = flags; nlmclnt_async_call(cred, req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops); return status; } @@ -683,7 +683,7 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl) struct nlm_host *host = req->a_host; struct nlm_res *resp = &req->a_res; int status; - unsigned char fl_flags = fl->fl_flags; + unsigned char flags = fl->fl_flags; /* * Note: the server is supposed to either grant us the unlock @@ -694,7 +694,7 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl) down_read(&host->h_rwsem); status = do_vfs_lock(fl); up_read(&host->h_rwsem); - fl->fl_flags = fl_flags; + fl->fl_flags = flags; if (status == -ENOENT) { status = 0; goto out; diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c index e3b6229e7ae5..2f33c187b876 100644 --- a/fs/lockd/svcsubs.c +++ b/fs/lockd/svcsubs.c @@ -73,7 +73,7 @@ static inline unsigned int file_hash(struct nfs_fh *f) int lock_to_openmode(struct file_lock *lock) { - return (lock->fl_type == F_WRLCK) ? O_WRONLY : O_RDONLY; + return (lock_is_write(lock)) ? O_WRONLY : O_RDONLY; } /* @@ -218,7 +218,7 @@ nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file, again: file->f_locks = 0; spin_lock(&flctx->flc_lock); - list_for_each_entry(fl, &flctx->flc_posix, fl_list) { + for_each_file_lock(fl, &flctx->flc_posix) { if (fl->fl_lmops != &nlmsvc_lock_operations) continue; @@ -272,7 +272,7 @@ nlm_file_inuse(struct nlm_file *file) if (flctx && !list_empty_careful(&flctx->flc_posix)) { spin_lock(&flctx->flc_lock); - list_for_each_entry(fl, &flctx->flc_posix, fl_list) { + for_each_file_lock(fl, &flctx->flc_posix) { if (fl->fl_lmops == &nlmsvc_lock_operations) { spin_unlock(&flctx->flc_lock); return 1; -- cgit v1.2.3 From d7c9616be0759c1cfb44a68ba838548d22b98484 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:01:52 -0500 Subject: nfs: convert to using new filelock helpers Convert to using the new file locking helper functions. Also, in later patches we're going to introduce some temporary macros with names that clash with the variable name in nfs4_proc_unlck. Rename it. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-11-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/nfs/delegation.c | 2 +- fs/nfs/file.c | 4 ++-- fs/nfs/nfs4proc.c | 12 ++++++------ fs/nfs/nfs4state.c | 18 +++++++++--------- fs/nfs/nfs4xdr.c | 2 +- fs/nfs/write.c | 4 ++-- 6 files changed, 21 insertions(+), 21 deletions(-) (limited to 'fs') diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index fa1a14def45c..ca6985001466 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -156,7 +156,7 @@ static int nfs_delegation_claim_locks(struct nfs4_state *state, const nfs4_state list = &flctx->flc_posix; spin_lock(&flctx->flc_lock); restart: - list_for_each_entry(fl, list, fl_list) { + for_each_file_lock(fl, list) { if (nfs_file_open_context(fl->fl_file)->state != state) continue; spin_unlock(&flctx->flc_lock); diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 8577ccf621f5..1a7a76d6055b 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -851,7 +851,7 @@ int nfs_lock(struct file *filp, int cmd, struct file_lock *fl) if (IS_GETLK(cmd)) ret = do_getlk(filp, cmd, fl, is_local); - else if (fl->fl_type == F_UNLCK) + else if (lock_is_unlock(fl)) ret = do_unlk(filp, cmd, fl, is_local); else ret = do_setlk(filp, cmd, fl, is_local); @@ -878,7 +878,7 @@ int nfs_flock(struct file *filp, int cmd, struct file_lock *fl) is_local = 1; /* We're simulating flock() locks using posix locks on the server */ - if (fl->fl_type == F_UNLCK) + if (lock_is_unlock(fl)) return do_unlk(filp, cmd, fl, is_local); return do_setlk(filp, cmd, fl, is_local); } diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 23819a756508..df54fcd0fa08 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -7045,7 +7045,7 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock * struct rpc_task *task; struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); int status = 0; - unsigned char fl_flags = request->fl_flags; + unsigned char saved_flags = request->fl_flags; status = nfs4_set_lock_state(state, request); /* Unlock _before_ we do the RPC call */ @@ -7080,7 +7080,7 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock * status = rpc_wait_for_completion_task(task); rpc_put_task(task); out: - request->fl_flags = fl_flags; + request->fl_flags = saved_flags; trace_nfs4_unlock(request, state, F_SETLK, status); return status; } @@ -7398,7 +7398,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock { struct nfs_inode *nfsi = NFS_I(state->inode); struct nfs4_state_owner *sp = state->owner; - unsigned char fl_flags = request->fl_flags; + unsigned char flags = request->fl_flags; int status; request->fl_flags |= FL_ACCESS; @@ -7410,7 +7410,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { /* Yes: cache locks! */ /* ...but avoid races with delegation recall... */ - request->fl_flags = fl_flags & ~FL_SLEEP; + request->fl_flags = flags & ~FL_SLEEP; status = locks_lock_inode_wait(state->inode, request); up_read(&nfsi->rwsem); mutex_unlock(&sp->so_delegreturn_mutex); @@ -7420,7 +7420,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock mutex_unlock(&sp->so_delegreturn_mutex); status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); out: - request->fl_flags = fl_flags; + request->fl_flags = flags; return status; } @@ -7562,7 +7562,7 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) if (!(IS_SETLK(cmd) || IS_SETLKW(cmd))) return -EINVAL; - if (request->fl_type == F_UNLCK) { + if (lock_is_unlock(request)) { if (state != NULL) return nfs4_proc_unlck(state, cmd, request); return 0; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 9a5d911a7edc..16b57735e26a 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -847,15 +847,15 @@ void nfs4_close_sync(struct nfs4_state *state, fmode_t fmode) */ static struct nfs4_lock_state * __nfs4_find_lock_state(struct nfs4_state *state, - fl_owner_t fl_owner, fl_owner_t fl_owner2) + fl_owner_t owner, fl_owner_t owner2) { struct nfs4_lock_state *pos, *ret = NULL; list_for_each_entry(pos, &state->lock_states, ls_locks) { - if (pos->ls_owner == fl_owner) { + if (pos->ls_owner == owner) { ret = pos; break; } - if (pos->ls_owner == fl_owner2) + if (pos->ls_owner == owner2) ret = pos; } if (ret) @@ -868,7 +868,7 @@ __nfs4_find_lock_state(struct nfs4_state *state, * exists, return an uninitialized one. * */ -static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner) +static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t owner) { struct nfs4_lock_state *lsp; struct nfs_server *server = state->owner->so_server; @@ -879,7 +879,7 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f nfs4_init_seqid_counter(&lsp->ls_seqid); refcount_set(&lsp->ls_count, 1); lsp->ls_state = state; - lsp->ls_owner = fl_owner; + lsp->ls_owner = owner; lsp->ls_seqid.owner_id = ida_alloc(&server->lockowner_id, GFP_KERNEL_ACCOUNT); if (lsp->ls_seqid.owner_id < 0) goto out_free; @@ -993,7 +993,7 @@ static int nfs4_copy_lock_stateid(nfs4_stateid *dst, const struct nfs_lock_context *l_ctx) { struct nfs4_lock_state *lsp; - fl_owner_t fl_owner, fl_flock_owner; + fl_owner_t owner, fl_flock_owner; int ret = -ENOENT; if (l_ctx == NULL) @@ -1002,11 +1002,11 @@ static int nfs4_copy_lock_stateid(nfs4_stateid *dst, if (test_bit(LK_STATE_IN_USE, &state->flags) == 0) goto out; - fl_owner = l_ctx->lockowner; + owner = l_ctx->lockowner; fl_flock_owner = l_ctx->open_context->flock_owner; spin_lock(&state->state_lock); - lsp = __nfs4_find_lock_state(state, fl_owner, fl_flock_owner); + lsp = __nfs4_find_lock_state(state, owner, fl_flock_owner); if (lsp && test_bit(NFS_LOCK_LOST, &lsp->ls_flags)) ret = -EIO; else if (lsp != NULL && test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) { @@ -1529,7 +1529,7 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_ down_write(&nfsi->rwsem); spin_lock(&flctx->flc_lock); restart: - list_for_each_entry(fl, list, fl_list) { + for_each_file_lock(fl, list) { if (nfs_file_open_context(fl->fl_file)->state != state) continue; spin_unlock(&flctx->flc_lock); diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 69406e60f391..6e309db5afe4 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -1305,7 +1305,7 @@ static void encode_link(struct xdr_stream *xdr, const struct qstr *name, struct static inline int nfs4_lock_type(struct file_lock *fl, int block) { - if (fl->fl_type == F_RDLCK) + if (lock_is_read(fl)) return block ? NFS4_READW_LT : NFS4_READ_LT; return block ? NFS4_WRITEW_LT : NFS4_WRITE_LT; } diff --git a/fs/nfs/write.c b/fs/nfs/write.c index bb79d3a886ae..d16f2b9d1765 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1301,7 +1301,7 @@ static bool is_whole_file_wrlock(struct file_lock *fl) { return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX && - fl->fl_type == F_WRLCK; + lock_is_write(fl); } /* If we know the page is up to date, and we're not using byte range locks (or @@ -1341,7 +1341,7 @@ static int nfs_can_extend_write(struct file *file, struct folio *folio, } else if (!list_empty(&flctx->flc_flock)) { fl = list_first_entry(&flctx->flc_flock, struct file_lock, fl_list); - if (fl->fl_type == F_WRLCK) + if (lock_is_write(fl)) ret = 1; } spin_unlock(&flctx->flc_lock); -- cgit v1.2.3 From 60f3154d196b5b1e3a819f0c1d4e6c94bcb73937 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:01:53 -0500 Subject: nfsd: convert to using new filelock helpers Convert to using the new file locking helper functions. Also, in later patches we're going to introduce some macros with names that clash with the variable names in nfsd4_lock. Rename them. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-12-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/nfsd/nfs4state.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 2fa54cfd4882..dd83635d4adf 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -7493,8 +7493,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, int lkflg; int err; bool new = false; - unsigned char fl_type; - unsigned int fl_flags = FL_POSIX; + unsigned char type; + unsigned int flags = FL_POSIX; struct net *net = SVC_NET(rqstp); struct nfsd_net *nn = net_generic(net, nfsd_net_id); @@ -7557,14 +7557,14 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, goto out; if (lock->lk_reclaim) - fl_flags |= FL_RECLAIM; + flags |= FL_RECLAIM; fp = lock_stp->st_stid.sc_file; switch (lock->lk_type) { case NFS4_READW_LT: if (nfsd4_has_session(cstate) || exportfs_lock_op_is_async(sb->s_export_op)) - fl_flags |= FL_SLEEP; + flags |= FL_SLEEP; fallthrough; case NFS4_READ_LT: spin_lock(&fp->fi_lock); @@ -7572,12 +7572,12 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, if (nf) get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ); spin_unlock(&fp->fi_lock); - fl_type = F_RDLCK; + type = F_RDLCK; break; case NFS4_WRITEW_LT: if (nfsd4_has_session(cstate) || exportfs_lock_op_is_async(sb->s_export_op)) - fl_flags |= FL_SLEEP; + flags |= FL_SLEEP; fallthrough; case NFS4_WRITE_LT: spin_lock(&fp->fi_lock); @@ -7585,7 +7585,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, if (nf) get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE); spin_unlock(&fp->fi_lock); - fl_type = F_WRLCK; + type = F_WRLCK; break; default: status = nfserr_inval; @@ -7605,7 +7605,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, * on those filesystems: */ if (!exportfs_lock_op_is_async(sb->s_export_op)) - fl_flags &= ~FL_SLEEP; + flags &= ~FL_SLEEP; nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn); if (!nbl) { @@ -7615,11 +7615,11 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, } file_lock = &nbl->nbl_lock; - file_lock->fl_type = fl_type; + file_lock->fl_type = type; file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner)); file_lock->fl_pid = current->tgid; file_lock->fl_file = nf->nf_file; - file_lock->fl_flags = fl_flags; + file_lock->fl_flags = flags; file_lock->fl_lmops = &nfsd_posix_mng_ops; file_lock->fl_start = lock->lk_offset; file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length); @@ -7632,7 +7632,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, goto out; } - if (fl_flags & FL_SLEEP) { + if (flags & FL_SLEEP) { nbl->nbl_time = ktime_get_boottime_seconds(); spin_lock(&nn->blocked_locks_lock); list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked); @@ -7669,7 +7669,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, out: if (nbl) { /* dequeue it if we queued it before */ - if (fl_flags & FL_SLEEP) { + if (flags & FL_SLEEP) { spin_lock(&nn->blocked_locks_lock); if (!list_empty(&nbl->nbl_list) && !list_empty(&nbl->nbl_lru)) { @@ -7926,7 +7926,7 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner) if (flctx && !list_empty_careful(&flctx->flc_posix)) { spin_lock(&flctx->flc_lock); - list_for_each_entry(fl, &flctx->flc_posix, fl_list) { + for_each_file_lock(fl, &flctx->flc_posix) { if (fl->fl_owner == (fl_owner_t)lowner) { status = true; break; @@ -8455,7 +8455,7 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode) if (!ctx) return 0; spin_lock(&ctx->flc_lock); - list_for_each_entry(fl, &ctx->flc_lease, fl_list) { + for_each_file_lock(fl, &ctx->flc_lease) { if (fl->fl_flags == FL_LAYOUT) continue; if (fl->fl_lmops != &nfsd_lease_mng_ops) { @@ -8464,11 +8464,11 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode) * we are done; there isn't any write delegation * on this inode */ - if (fl->fl_type == F_RDLCK) + if (lock_is_read(fl)) break; goto break_lease; } - if (fl->fl_type == F_WRLCK) { + if (lock_is_write(fl)) { dp = fl->fl_owner; if (dp->dl_recall.cb_clp == *(rqstp->rq_lease_breaker)) { spin_unlock(&ctx->flc_lock); -- cgit v1.2.3 From 64f92a554f72cbfe8b971f93af9d07f87599bc88 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:01:54 -0500 Subject: ocfs2: convert to using new filelock helpers Convert to using the new file locking helper functions. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-13-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/ocfs2/locks.c | 4 ++-- fs/ocfs2/stack_user.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/locks.c b/fs/ocfs2/locks.c index f37174e79fad..ef4fd91b586e 100644 --- a/fs/ocfs2/locks.c +++ b/fs/ocfs2/locks.c @@ -27,7 +27,7 @@ static int ocfs2_do_flock(struct file *file, struct inode *inode, struct ocfs2_file_private *fp = file->private_data; struct ocfs2_lock_res *lockres = &fp->fp_flock; - if (fl->fl_type == F_WRLCK) + if (lock_is_write(fl)) level = 1; if (!IS_SETLKW(cmd)) trylock = 1; @@ -107,7 +107,7 @@ int ocfs2_flock(struct file *file, int cmd, struct file_lock *fl) ocfs2_mount_local(osb)) return locks_lock_file_wait(file, fl); - if (fl->fl_type == F_UNLCK) + if (lock_is_unlock(fl)) return ocfs2_do_funlock(file, cmd, fl); else return ocfs2_do_flock(file, inode, cmd, fl); diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c index 9b76ee66aeb2..c11406cd87a8 100644 --- a/fs/ocfs2/stack_user.c +++ b/fs/ocfs2/stack_user.c @@ -744,7 +744,7 @@ static int user_plock(struct ocfs2_cluster_connection *conn, return dlm_posix_cancel(conn->cc_lockspace, ino, file, fl); else if (IS_GETLK(cmd)) return dlm_posix_get(conn->cc_lockspace, ino, file, fl); - else if (fl->fl_type == F_UNLCK) + else if (lock_is_unlock(fl)) return dlm_posix_unlock(conn->cc_lockspace, ino, file, fl); else return dlm_posix_lock(conn->cc_lockspace, ino, file, cmd, fl); -- cgit v1.2.3 From 2cd114294d1dad6ed19217c909680f9fd30ee711 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:01:55 -0500 Subject: smb/client: convert to using new filelock helpers Convert to using the new file locking helper functions. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-14-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/smb/client/file.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c index 3a213432775b..c26f8ed14065 100644 --- a/fs/smb/client/file.c +++ b/fs/smb/client/file.c @@ -1409,7 +1409,7 @@ cifs_posix_lock_test(struct file *file, struct file_lock *flock) down_read(&cinode->lock_sem); posix_test_lock(file, flock); - if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) { + if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) { flock->fl_type = saved_type; rc = 1; } @@ -1581,7 +1581,7 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile) el = locks_to_send.next; spin_lock(&flctx->flc_lock); - list_for_each_entry(flock, &flctx->flc_posix, fl_list) { + for_each_file_lock(flock, &flctx->flc_posix) { if (el == &locks_to_send) { /* * The list ended. We don't have enough allocated @@ -1591,7 +1591,7 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile) break; } length = cifs_flock_len(flock); - if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK) + if (lock_is_read(flock) || flock->fl_type == F_SHLCK) type = CIFS_RDLCK; else type = CIFS_WRLCK; @@ -1681,16 +1681,16 @@ cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock, cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags); *type = server->vals->large_lock_type; - if (flock->fl_type == F_WRLCK) { + if (lock_is_write(flock)) { cifs_dbg(FYI, "F_WRLCK\n"); *type |= server->vals->exclusive_lock_type; *lock = 1; - } else if (flock->fl_type == F_UNLCK) { + } else if (lock_is_unlock(flock)) { cifs_dbg(FYI, "F_UNLCK\n"); *type |= server->vals->unlock_lock_type; *unlock = 1; /* Check if unlock includes more than one lock range */ - } else if (flock->fl_type == F_RDLCK) { + } else if (lock_is_read(flock)) { cifs_dbg(FYI, "F_RDLCK\n"); *type |= server->vals->shared_lock_type; *lock = 1; -- cgit v1.2.3 From 6a277077ac5189d7633f8c57e153e0a73fab39d2 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:01:56 -0500 Subject: smb/server: convert to using new filelock helpers Convert to using the new file locking helper functions. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-15-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/smb/server/smb2pdu.c | 6 +++--- fs/smb/server/vfs.c | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index ba7a72a6a4f4..e170b96d5ac0 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -6841,7 +6841,7 @@ static void smb2_remove_blocked_lock(void **argv) struct file_lock *flock = (struct file_lock *)argv[0]; ksmbd_vfs_posix_lock_unblock(flock); - wake_up(&flock->fl_wait); + locks_wake_up(flock); } static inline bool lock_defer_pending(struct file_lock *fl) @@ -6991,7 +6991,7 @@ int smb2_lock(struct ksmbd_work *work) file_inode(smb_lock->fl->fl_file)) continue; - if (smb_lock->fl->fl_type == F_UNLCK) { + if (lock_is_unlock(smb_lock->fl)) { if (cmp_lock->fl->fl_file == smb_lock->fl->fl_file && cmp_lock->start == smb_lock->start && cmp_lock->end == smb_lock->end && @@ -7051,7 +7051,7 @@ int smb2_lock(struct ksmbd_work *work) } up_read(&conn_list_lock); out_check_cl: - if (smb_lock->fl->fl_type == F_UNLCK && nolock) { + if (lock_is_unlock(smb_lock->fl) && nolock) { pr_err("Try to unlock nolocked range\n"); rsp->hdr.Status = STATUS_RANGE_NOT_LOCKED; goto out; diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c index a6961bfe3e13..449cfa9ed31c 100644 --- a/fs/smb/server/vfs.c +++ b/fs/smb/server/vfs.c @@ -337,16 +337,16 @@ static int check_lock_range(struct file *filp, loff_t start, loff_t end, return 0; spin_lock(&ctx->flc_lock); - list_for_each_entry(flock, &ctx->flc_posix, fl_list) { + for_each_file_lock(flock, &ctx->flc_posix) { /* check conflict locks */ if (flock->fl_end >= start && end >= flock->fl_start) { - if (flock->fl_type == F_RDLCK) { + if (lock_is_read(flock)) { if (type == WRITE) { pr_err("not allow write by shared lock\n"); error = 1; goto out; } - } else if (flock->fl_type == F_WRLCK) { + } else if (lock_is_write(flock)) { /* check owner in lock */ if (flock->fl_file != filp) { error = 1; -- cgit v1.2.3 From 3d40f78169a0a954ff06e2b0f8e21463f7edb433 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:01:57 -0500 Subject: filelock: drop the IS_* macros These don't add a lot of value over just open-coding the flag check. Suggested-by: NeilBrown Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-16-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/locks.c | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) (limited to 'fs') diff --git a/fs/locks.c b/fs/locks.c index 149070fd3b66..d685c3fdbea5 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -70,12 +70,6 @@ #include -#define IS_POSIX(fl) (fl->fl_flags & FL_POSIX) -#define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK) -#define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT)) -#define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK) -#define IS_REMOTELCK(fl) (fl->fl_pid <= 0) - static bool lease_breaking(struct file_lock *fl) { return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING); @@ -767,7 +761,7 @@ new_blocker: } waiter->fl_blocker = blocker; list_add_tail(&waiter->fl_blocked_member, &blocker->fl_blocked_requests); - if (IS_POSIX(blocker) && !IS_OFDLCK(blocker)) + if ((blocker->fl_flags & (FL_POSIX|FL_OFDLCK)) == FL_POSIX) locks_insert_global_blocked(waiter); /* The requests in waiter->fl_blocked are known to conflict with @@ -999,7 +993,7 @@ static int posix_locks_deadlock(struct file_lock *caller_fl, * This deadlock detector can't reasonably detect deadlocks with * FL_OFDLCK locks, since they aren't owned by a process, per-se. */ - if (IS_OFDLCK(caller_fl)) + if (caller_fl->fl_flags & FL_OFDLCK) return 0; while ((block_fl = what_owner_is_waiting_for(block_fl))) { @@ -2150,10 +2144,13 @@ static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns) pid_t vnr; struct pid *pid; - if (IS_OFDLCK(fl)) + if (fl->fl_flags & FL_OFDLCK) return -1; - if (IS_REMOTELCK(fl)) + + /* Remote locks report a negative pid value */ + if (fl->fl_pid <= 0) return fl->fl_pid; + /* * If the flock owner process is dead and its pid has been already * freed, the translation below won't work, but we still want to show @@ -2697,7 +2694,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl, struct inode *inode = NULL; unsigned int pid; struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb); - int type; + int type = fl->fl_type; pid = locks_translate_pid(fl, proc_pidns); /* @@ -2714,19 +2711,21 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl, if (repeat) seq_printf(f, "%*s", repeat - 1 + (int)strlen(pfx), pfx); - if (IS_POSIX(fl)) { + if (fl->fl_flags & FL_POSIX) { if (fl->fl_flags & FL_ACCESS) seq_puts(f, "ACCESS"); - else if (IS_OFDLCK(fl)) + else if (fl->fl_flags & FL_OFDLCK) seq_puts(f, "OFDLCK"); else seq_puts(f, "POSIX "); seq_printf(f, " %s ", (inode == NULL) ? "*NOINODE*" : "ADVISORY "); - } else if (IS_FLOCK(fl)) { + } else if (fl->fl_flags & FL_FLOCK) { seq_puts(f, "FLOCK ADVISORY "); - } else if (IS_LEASE(fl)) { + } else if (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT)) { + type = target_leasetype(fl); + if (fl->fl_flags & FL_DELEG) seq_puts(f, "DELEG "); else @@ -2741,7 +2740,6 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl, } else { seq_puts(f, "UNKNOWN UNKNOWN "); } - type = IS_LEASE(fl) ? target_leasetype(fl) : fl->fl_type; seq_printf(f, "%s ", (type == F_WRLCK) ? "WRITE" : (type == F_RDLCK) ? "READ" : "UNLCK"); @@ -2753,7 +2751,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl, } else { seq_printf(f, "%d :0 ", pid); } - if (IS_POSIX(fl)) { + if (fl->fl_flags & FL_POSIX) { if (fl->fl_end == OFFSET_MAX) seq_printf(f, "%Ld EOF\n", fl->fl_start); else -- cgit v1.2.3 From a69ce85ec9af6bdc0b3511959a7dc1a324e5e16a Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:01:58 -0500 Subject: filelock: split common fields into struct file_lock_core In a future patch, we're going to split file leases into their own structure. Since a lot of the underlying machinery uses the same fields move those into a new file_lock_core, and embed that inside struct file_lock. For now, add some macros to ensure that we can continue to build while the conversion is in progress. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-17-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/9p/vfs_file.c | 1 + fs/afs/internal.h | 1 + fs/ceph/locks.c | 1 + fs/dlm/plock.c | 1 + fs/fuse/file.c | 1 + fs/gfs2/file.c | 1 + fs/lockd/clntproc.c | 1 + fs/locks.c | 1 + fs/nfs/file.c | 1 + fs/nfs/nfs4_fs.h | 1 + fs/nfs/write.c | 1 + fs/nfsd/netns.h | 1 + fs/ocfs2/locks.c | 1 + fs/ocfs2/stack_user.c | 1 + fs/open.c | 2 +- fs/posix_acl.c | 4 ++-- fs/smb/client/cifsglob.h | 1 + fs/smb/client/cifssmb.c | 1 + fs/smb/client/file.c | 3 ++- fs/smb/client/smb2file.c | 1 + fs/smb/server/smb2pdu.c | 1 + fs/smb/server/vfs.c | 1 + include/linux/filelock.h | 57 ++++++++++++++++++++++++++++++++--------------- include/linux/lockd/xdr.h | 3 ++- 24 files changed, 65 insertions(+), 23 deletions(-) (limited to 'fs') diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index 3df8aa1b5996..a1dabcf73380 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c @@ -9,6 +9,7 @@ #include #include #include +#define _NEED_FILE_LOCK_FIELD_MACROS #include #include #include diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 9c03fcf7ffaa..f5dd428e40f4 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -9,6 +9,7 @@ #include #include #include +#define _NEED_FILE_LOCK_FIELD_MACROS #include #include #include diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c index 80ebe1d6c67d..ce773e9c0b79 100644 --- a/fs/ceph/locks.c +++ b/fs/ceph/locks.c @@ -7,6 +7,7 @@ #include "super.h" #include "mds_client.h" +#define _NEED_FILE_LOCK_FIELD_MACROS #include #include diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c index 42c596b900d4..fdcddbb96d40 100644 --- a/fs/dlm/plock.c +++ b/fs/dlm/plock.c @@ -4,6 +4,7 @@ */ #include +#define _NEED_FILE_LOCK_FIELD_MACROS #include #include #include diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 148a71b8b4d0..2757870ee6ac 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -18,6 +18,7 @@ #include #include #include +#define _NEED_FILE_LOCK_FIELD_MACROS #include #include diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 6c25aea30f1b..d06488de1b3b 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -15,6 +15,7 @@ #include #include #include +#define _NEED_FILE_LOCK_FIELD_MACROS #include #include #include diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c index cc596748e359..1f71260603b7 100644 --- a/fs/lockd/clntproc.c +++ b/fs/lockd/clntproc.c @@ -12,6 +12,7 @@ #include #include #include +#define _NEED_FILE_LOCK_FIELD_MACROS #include #include #include diff --git a/fs/locks.c b/fs/locks.c index d685c3fdbea5..097254ab35d3 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -48,6 +48,7 @@ * children. * */ +#define _NEED_FILE_LOCK_FIELD_MACROS #include #include diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 1a7a76d6055b..0b6691e64d27 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -31,6 +31,7 @@ #include #include +#define _NEED_FILE_LOCK_FIELD_MACROS #include #include "delegation.h" diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 581698f1b7b2..752224a48f1c 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -23,6 +23,7 @@ #define NFS4_MAX_LOOP_ON_RECOVER (10) #include +#define _NEED_FILE_LOCK_FIELD_MACROS #include struct idmap; diff --git a/fs/nfs/write.c b/fs/nfs/write.c index d16f2b9d1765..13f2e10167ac 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -25,6 +25,7 @@ #include #include #include +#define _NEED_FILE_LOCK_FIELD_MACROS #include #include diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h index 74b4360779a1..fd91125208be 100644 --- a/fs/nfsd/netns.h +++ b/fs/nfsd/netns.h @@ -10,6 +10,7 @@ #include #include +#define _NEED_FILE_LOCK_FIELD_MACROS #include #include #include diff --git a/fs/ocfs2/locks.c b/fs/ocfs2/locks.c index ef4fd91b586e..84ad403b5998 100644 --- a/fs/ocfs2/locks.c +++ b/fs/ocfs2/locks.c @@ -8,6 +8,7 @@ */ #include +#define _NEED_FILE_LOCK_FIELD_MACROS #include #include diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c index c11406cd87a8..39b7e47a8618 100644 --- a/fs/ocfs2/stack_user.c +++ b/fs/ocfs2/stack_user.c @@ -9,6 +9,7 @@ #include #include +#define _NEED_FILE_LOCK_FIELD_MACROS #include #include #include diff --git a/fs/open.c b/fs/open.c index a84d21e55c39..0a73afe04d34 100644 --- a/fs/open.c +++ b/fs/open.c @@ -1364,7 +1364,7 @@ struct file *filp_open(const char *filename, int flags, umode_t mode) { struct filename *name = getname_kernel(filename); struct file *file = ERR_CAST(name); - + if (!IS_ERR(name)) { file = file_open_name(name, flags, mode); putname(name); diff --git a/fs/posix_acl.c b/fs/posix_acl.c index e1af20893ebe..6bf587d1a9b8 100644 --- a/fs/posix_acl.c +++ b/fs/posix_acl.c @@ -786,12 +786,12 @@ struct posix_acl *posix_acl_from_xattr(struct user_namespace *userns, return ERR_PTR(count); if (count == 0) return NULL; - + acl = posix_acl_alloc(count, GFP_NOFS); if (!acl) return ERR_PTR(-ENOMEM); acl_e = acl->a_entries; - + for (end = entry + count; entry != end; acl_e++, entry++) { acl_e->e_tag = le16_to_cpu(entry->e_tag); acl_e->e_perm = le16_to_cpu(entry->e_perm); diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h index 20036fb16cec..fcda4c77c649 100644 --- a/fs/smb/client/cifsglob.h +++ b/fs/smb/client/cifsglob.h @@ -26,6 +26,7 @@ #include #include "../common/smb2pdu.h" #include "smb2pdu.h" +#define _NEED_FILE_LOCK_FIELD_MACROS #include #define SMB_PATH_MAX 260 diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c index 01e89070df5a..e19ecf692c20 100644 --- a/fs/smb/client/cifssmb.c +++ b/fs/smb/client/cifssmb.c @@ -15,6 +15,7 @@ /* want to reuse a stale file handle and only the caller knows the file info */ #include +#define _NEED_FILE_LOCK_FIELD_MACROS #include #include #include diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c index c26f8ed14065..d3f5969c3a86 100644 --- a/fs/smb/client/file.c +++ b/fs/smb/client/file.c @@ -9,6 +9,7 @@ * */ #include +#define _NEED_FILE_LOCK_FIELD_MACROS #include #include #include @@ -2951,7 +2952,7 @@ skip_write: continue; } - folio_batch_release(&fbatch); + folio_batch_release(&fbatch); cond_resched(); } while (wbc->nr_to_write > 0); diff --git a/fs/smb/client/smb2file.c b/fs/smb/client/smb2file.c index e0ee96d69d49..cd225d15a7c5 100644 --- a/fs/smb/client/smb2file.c +++ b/fs/smb/client/smb2file.c @@ -7,6 +7,7 @@ * */ #include +#define _NEED_FILE_LOCK_FIELD_MACROS #include #include #include diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index e170b96d5ac0..11cc28719582 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -12,6 +12,7 @@ #include #include #include +#define _NEED_FILE_LOCK_FIELD_MACROS #include #include "glob.h" diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c index 449cfa9ed31c..5dc87649400b 100644 --- a/fs/smb/server/vfs.c +++ b/fs/smb/server/vfs.c @@ -6,6 +6,7 @@ #include #include +#define _NEED_FILE_LOCK_FIELD_MACROS #include #include #include diff --git a/include/linux/filelock.h b/include/linux/filelock.h index a3cb59b7922a..d1fba98744a7 100644 --- a/include/linux/filelock.h +++ b/include/linux/filelock.h @@ -85,23 +85,28 @@ bool opens_in_grace(struct net *); * * Obviously, the last two criteria only matter for POSIX locks. */ -struct file_lock { - struct file_lock *fl_blocker; /* The lock, that is blocking us */ - struct list_head fl_list; /* link into file_lock_context */ - struct hlist_node fl_link; /* node in global lists */ - struct list_head fl_blocked_requests; /* list of requests with + +struct file_lock_core { + struct file_lock *flc_blocker; /* The lock that is blocking us */ + struct list_head flc_list; /* link into file_lock_context */ + struct hlist_node flc_link; /* node in global lists */ + struct list_head flc_blocked_requests; /* list of requests with * ->fl_blocker pointing here */ - struct list_head fl_blocked_member; /* node in + struct list_head flc_blocked_member; /* node in * ->fl_blocker->fl_blocked_requests */ - fl_owner_t fl_owner; - unsigned int fl_flags; - unsigned char fl_type; - pid_t fl_pid; - int fl_link_cpu; /* what cpu's list is this on? */ - wait_queue_head_t fl_wait; - struct file *fl_file; + fl_owner_t flc_owner; + unsigned int flc_flags; + unsigned char flc_type; + pid_t flc_pid; + int flc_link_cpu; /* what cpu's list is this on? */ + wait_queue_head_t flc_wait; + struct file *flc_file; +}; + +struct file_lock { + struct file_lock_core c; loff_t fl_start; loff_t fl_end; @@ -126,6 +131,22 @@ struct file_lock { } fl_u; } __randomize_layout; +/* Temporary macros to allow building during coccinelle conversion */ +#ifdef _NEED_FILE_LOCK_FIELD_MACROS +#define fl_list c.flc_list +#define fl_blocker c.flc_blocker +#define fl_link c.flc_link +#define fl_blocked_requests c.flc_blocked_requests +#define fl_blocked_member c.flc_blocked_member +#define fl_owner c.flc_owner +#define fl_flags c.flc_flags +#define fl_type c.flc_type +#define fl_pid c.flc_pid +#define fl_link_cpu c.flc_link_cpu +#define fl_wait c.flc_wait +#define fl_file c.flc_file +#endif + struct file_lock_context { spinlock_t flc_lock; struct list_head flc_flock; @@ -149,26 +170,26 @@ int fcntl_getlease(struct file *filp); static inline bool lock_is_unlock(struct file_lock *fl) { - return fl->fl_type == F_UNLCK; + return fl->c.flc_type == F_UNLCK; } static inline bool lock_is_read(struct file_lock *fl) { - return fl->fl_type == F_RDLCK; + return fl->c.flc_type == F_RDLCK; } static inline bool lock_is_write(struct file_lock *fl) { - return fl->fl_type == F_WRLCK; + return fl->c.flc_type == F_WRLCK; } static inline void locks_wake_up(struct file_lock *fl) { - wake_up(&fl->fl_wait); + wake_up(&fl->c.flc_wait); } /* for walking lists of file_locks linked by fl_list */ -#define for_each_file_lock(_fl, _head) list_for_each_entry(_fl, _head, fl_list) +#define for_each_file_lock(_fl, _head) list_for_each_entry(_fl, _head, c.flc_list) /* fs/locks.c */ void locks_free_lock_context(struct inode *inode); diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h index b60fbcd8cdfa..a3f068b0ca86 100644 --- a/include/linux/lockd/xdr.h +++ b/include/linux/lockd/xdr.h @@ -11,6 +11,7 @@ #define LOCKD_XDR_H #include +#define _NEED_FILE_LOCK_FIELD_MACROS #include #include #include @@ -52,7 +53,7 @@ struct nlm_lock { * FreeBSD uses 16, Apple Mac OS X 10.3 uses 20. Therefore we set it to * 32 bytes. */ - + struct nlm_cookie { unsigned char data[NLM_MAXCOOKIELEN]; -- cgit v1.2.3 From 4ca52f539865853e6ddbc393745cd0b305f0d810 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:01:59 -0500 Subject: filelock: have fs/locks.c deal with file_lock_core directly Convert fs/locks.c to access fl_core fields direcly rather than using the backward-compatibility macros. Most of this was done with coccinelle, with a few by-hand fixups. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-18-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/locks.c | 467 ++++++++++++++++++++-------------------- include/trace/events/filelock.h | 32 +-- 2 files changed, 254 insertions(+), 245 deletions(-) (limited to 'fs') diff --git a/fs/locks.c b/fs/locks.c index 097254ab35d3..f418c6e31219 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -48,8 +48,6 @@ * children. * */ -#define _NEED_FILE_LOCK_FIELD_MACROS - #include #include #include @@ -73,16 +71,16 @@ static bool lease_breaking(struct file_lock *fl) { - return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING); + return fl->c.flc_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING); } static int target_leasetype(struct file_lock *fl) { - if (fl->fl_flags & FL_UNLOCK_PENDING) + if (fl->c.flc_flags & FL_UNLOCK_PENDING) return F_UNLCK; - if (fl->fl_flags & FL_DOWNGRADE_PENDING) + if (fl->c.flc_flags & FL_DOWNGRADE_PENDING) return F_RDLCK; - return fl->fl_type; + return fl->c.flc_type; } static int leases_enable = 1; @@ -201,8 +199,10 @@ locks_dump_ctx_list(struct list_head *list, char *list_type) { struct file_lock *fl; - list_for_each_entry(fl, list, fl_list) { - pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid); + list_for_each_entry(fl, list, c.flc_list) { + pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, + fl->c.flc_owner, fl->c.flc_flags, + fl->c.flc_type, fl->c.flc_pid); } } @@ -230,13 +230,14 @@ locks_check_ctx_file_list(struct file *filp, struct list_head *list, struct file_lock *fl; struct inode *inode = file_inode(filp); - list_for_each_entry(fl, list, fl_list) - if (fl->fl_file == filp) + list_for_each_entry(fl, list, c.flc_list) + if (fl->c.flc_file == filp) pr_warn("Leaked %s lock on dev=0x%x:0x%x ino=0x%lx " " fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev), inode->i_ino, - fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid); + fl->c.flc_owner, fl->c.flc_flags, + fl->c.flc_type, fl->c.flc_pid); } void @@ -250,13 +251,13 @@ locks_free_lock_context(struct inode *inode) } } -static void locks_init_lock_heads(struct file_lock *fl) +static void locks_init_lock_heads(struct file_lock_core *flc) { - INIT_HLIST_NODE(&fl->fl_link); - INIT_LIST_HEAD(&fl->fl_list); - INIT_LIST_HEAD(&fl->fl_blocked_requests); - INIT_LIST_HEAD(&fl->fl_blocked_member); - init_waitqueue_head(&fl->fl_wait); + INIT_HLIST_NODE(&flc->flc_link); + INIT_LIST_HEAD(&flc->flc_list); + INIT_LIST_HEAD(&flc->flc_blocked_requests); + INIT_LIST_HEAD(&flc->flc_blocked_member); + init_waitqueue_head(&flc->flc_wait); } /* Allocate an empty lock structure. */ @@ -265,7 +266,7 @@ struct file_lock *locks_alloc_lock(void) struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL); if (fl) - locks_init_lock_heads(fl); + locks_init_lock_heads(&fl->c); return fl; } @@ -273,11 +274,11 @@ EXPORT_SYMBOL_GPL(locks_alloc_lock); void locks_release_private(struct file_lock *fl) { - BUG_ON(waitqueue_active(&fl->fl_wait)); - BUG_ON(!list_empty(&fl->fl_list)); - BUG_ON(!list_empty(&fl->fl_blocked_requests)); - BUG_ON(!list_empty(&fl->fl_blocked_member)); - BUG_ON(!hlist_unhashed(&fl->fl_link)); + BUG_ON(waitqueue_active(&fl->c.flc_wait)); + BUG_ON(!list_empty(&fl->c.flc_list)); + BUG_ON(!list_empty(&fl->c.flc_blocked_requests)); + BUG_ON(!list_empty(&fl->c.flc_blocked_member)); + BUG_ON(!hlist_unhashed(&fl->c.flc_link)); if (fl->fl_ops) { if (fl->fl_ops->fl_release_private) @@ -287,8 +288,8 @@ void locks_release_private(struct file_lock *fl) if (fl->fl_lmops) { if (fl->fl_lmops->lm_put_owner) { - fl->fl_lmops->lm_put_owner(fl->fl_owner); - fl->fl_owner = NULL; + fl->fl_lmops->lm_put_owner(fl->c.flc_owner); + fl->c.flc_owner = NULL; } fl->fl_lmops = NULL; } @@ -310,10 +311,10 @@ bool locks_owner_has_blockers(struct file_lock_context *flctx, struct file_lock *fl; spin_lock(&flctx->flc_lock); - list_for_each_entry(fl, &flctx->flc_posix, fl_list) { - if (fl->fl_owner != owner) + list_for_each_entry(fl, &flctx->flc_posix, c.flc_list) { + if (fl->c.flc_owner != owner) continue; - if (!list_empty(&fl->fl_blocked_requests)) { + if (!list_empty(&fl->c.flc_blocked_requests)) { spin_unlock(&flctx->flc_lock); return true; } @@ -337,8 +338,8 @@ locks_dispose_list(struct list_head *dispose) struct file_lock *fl; while (!list_empty(dispose)) { - fl = list_first_entry(dispose, struct file_lock, fl_list); - list_del_init(&fl->fl_list); + fl = list_first_entry(dispose, struct file_lock, c.flc_list); + list_del_init(&fl->c.flc_list); locks_free_lock(fl); } } @@ -346,7 +347,7 @@ locks_dispose_list(struct list_head *dispose) void locks_init_lock(struct file_lock *fl) { memset(fl, 0, sizeof(struct file_lock)); - locks_init_lock_heads(fl); + locks_init_lock_heads(&fl->c); } EXPORT_SYMBOL(locks_init_lock); @@ -355,11 +356,11 @@ EXPORT_SYMBOL(locks_init_lock); */ void locks_copy_conflock(struct file_lock *new, struct file_lock *fl) { - new->fl_owner = fl->fl_owner; - new->fl_pid = fl->fl_pid; - new->fl_file = NULL; - new->fl_flags = fl->fl_flags; - new->fl_type = fl->fl_type; + new->c.flc_owner = fl->c.flc_owner; + new->c.flc_pid = fl->c.flc_pid; + new->c.flc_file = NULL; + new->c.flc_flags = fl->c.flc_flags; + new->c.flc_type = fl->c.flc_type; new->fl_start = fl->fl_start; new->fl_end = fl->fl_end; new->fl_lmops = fl->fl_lmops; @@ -367,7 +368,7 @@ void locks_copy_conflock(struct file_lock *new, struct file_lock *fl) if (fl->fl_lmops) { if (fl->fl_lmops->lm_get_owner) - fl->fl_lmops->lm_get_owner(fl->fl_owner); + fl->fl_lmops->lm_get_owner(fl->c.flc_owner); } } EXPORT_SYMBOL(locks_copy_conflock); @@ -379,7 +380,7 @@ void locks_copy_lock(struct file_lock *new, struct file_lock *fl) locks_copy_conflock(new, fl); - new->fl_file = fl->fl_file; + new->c.flc_file = fl->c.flc_file; new->fl_ops = fl->fl_ops; if (fl->fl_ops) { @@ -398,12 +399,14 @@ static void locks_move_blocks(struct file_lock *new, struct file_lock *fl) * ->fl_blocked_requests, so we don't need a lock to check if it * is empty. */ - if (list_empty(&fl->fl_blocked_requests)) + if (list_empty(&fl->c.flc_blocked_requests)) return; spin_lock(&blocked_lock_lock); - list_splice_init(&fl->fl_blocked_requests, &new->fl_blocked_requests); - list_for_each_entry(f, &new->fl_blocked_requests, fl_blocked_member) - f->fl_blocker = new; + list_splice_init(&fl->c.flc_blocked_requests, + &new->c.flc_blocked_requests); + list_for_each_entry(f, &new->c.flc_blocked_requests, + c.flc_blocked_member) + f->c.flc_blocker = new; spin_unlock(&blocked_lock_lock); } @@ -424,11 +427,11 @@ static void flock_make_lock(struct file *filp, struct file_lock *fl, int type) { locks_init_lock(fl); - fl->fl_file = filp; - fl->fl_owner = filp; - fl->fl_pid = current->tgid; - fl->fl_flags = FL_FLOCK; - fl->fl_type = type; + fl->c.flc_file = filp; + fl->c.flc_owner = filp; + fl->c.flc_pid = current->tgid; + fl->c.flc_flags = FL_FLOCK; + fl->c.flc_type = type; fl->fl_end = OFFSET_MAX; } @@ -438,7 +441,7 @@ static int assign_type(struct file_lock *fl, int type) case F_RDLCK: case F_WRLCK: case F_UNLCK: - fl->fl_type = type; + fl->c.flc_type = type; break; default: return -EINVAL; @@ -483,10 +486,10 @@ static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl, } else fl->fl_end = OFFSET_MAX; - fl->fl_owner = current->files; - fl->fl_pid = current->tgid; - fl->fl_file = filp; - fl->fl_flags = FL_POSIX; + fl->c.flc_owner = current->files; + fl->c.flc_pid = current->tgid; + fl->c.flc_file = filp; + fl->c.flc_flags = FL_POSIX; fl->fl_ops = NULL; fl->fl_lmops = NULL; @@ -520,7 +523,7 @@ lease_break_callback(struct file_lock *fl) static void lease_setup(struct file_lock *fl, void **priv) { - struct file *filp = fl->fl_file; + struct file *filp = fl->c.flc_file; struct fasync_struct *fa = *priv; /* @@ -548,11 +551,11 @@ static int lease_init(struct file *filp, int type, struct file_lock *fl) if (assign_type(fl, type) != 0) return -EINVAL; - fl->fl_owner = filp; - fl->fl_pid = current->tgid; + fl->c.flc_owner = filp; + fl->c.flc_pid = current->tgid; - fl->fl_file = filp; - fl->fl_flags = FL_LEASE; + fl->c.flc_file = filp; + fl->c.flc_flags = FL_LEASE; fl->fl_start = 0; fl->fl_end = OFFSET_MAX; fl->fl_ops = NULL; @@ -590,7 +593,7 @@ static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2) */ static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2) { - return fl1->fl_owner == fl2->fl_owner; + return fl1->c.flc_owner == fl2->c.flc_owner; } /* Must be called with the flc_lock held! */ @@ -601,8 +604,8 @@ static void locks_insert_global_locks(struct file_lock *fl) percpu_rwsem_assert_held(&file_rwsem); spin_lock(&fll->lock); - fl->fl_link_cpu = smp_processor_id(); - hlist_add_head(&fl->fl_link, &fll->hlist); + fl->c.flc_link_cpu = smp_processor_id(); + hlist_add_head(&fl->c.flc_link, &fll->hlist); spin_unlock(&fll->lock); } @@ -618,33 +621,34 @@ static void locks_delete_global_locks(struct file_lock *fl) * is done while holding the flc_lock, and new insertions into the list * also require that it be held. */ - if (hlist_unhashed(&fl->fl_link)) + if (hlist_unhashed(&fl->c.flc_link)) return; - fll = per_cpu_ptr(&file_lock_list, fl->fl_link_cpu); + fll = per_cpu_ptr(&file_lock_list, fl->c.flc_link_cpu); spin_lock(&fll->lock); - hlist_del_init(&fl->fl_link); + hlist_del_init(&fl->c.flc_link); spin_unlock(&fll->lock); } static unsigned long posix_owner_key(struct file_lock *fl) { - return (unsigned long)fl->fl_owner; + return (unsigned long) fl->c.flc_owner; } static void locks_insert_global_blocked(struct file_lock *waiter) { lockdep_assert_held(&blocked_lock_lock); - hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter)); + hash_add(blocked_hash, &waiter->c.flc_link, + posix_owner_key(waiter)); } static void locks_delete_global_blocked(struct file_lock *waiter) { lockdep_assert_held(&blocked_lock_lock); - hash_del(&waiter->fl_link); + hash_del(&waiter->c.flc_link); } /* Remove waiter from blocker's block list. @@ -655,16 +659,16 @@ static void locks_delete_global_blocked(struct file_lock *waiter) static void __locks_delete_block(struct file_lock *waiter) { locks_delete_global_blocked(waiter); - list_del_init(&waiter->fl_blocked_member); + list_del_init(&waiter->c.flc_blocked_member); } static void __locks_wake_up_blocks(struct file_lock *blocker) { - while (!list_empty(&blocker->fl_blocked_requests)) { + while (!list_empty(&blocker->c.flc_blocked_requests)) { struct file_lock *waiter; - waiter = list_first_entry(&blocker->fl_blocked_requests, - struct file_lock, fl_blocked_member); + waiter = list_first_entry(&blocker->c.flc_blocked_requests, + struct file_lock, c.flc_blocked_member); __locks_delete_block(waiter); if (waiter->fl_lmops && waiter->fl_lmops->lm_notify) waiter->fl_lmops->lm_notify(waiter); @@ -676,7 +680,7 @@ static void __locks_wake_up_blocks(struct file_lock *blocker) * point in deleting a block. Paired with acquire at the top * of locks_delete_block(). */ - smp_store_release(&waiter->fl_blocker, NULL); + smp_store_release(&waiter->c.flc_blocker, NULL); } } @@ -711,12 +715,12 @@ int locks_delete_block(struct file_lock *waiter) * no new locks can be inserted into its fl_blocked_requests list, and * can avoid doing anything further if the list is empty. */ - if (!smp_load_acquire(&waiter->fl_blocker) && - list_empty(&waiter->fl_blocked_requests)) + if (!smp_load_acquire(&waiter->c.flc_blocker) && + list_empty(&waiter->c.flc_blocked_requests)) return status; spin_lock(&blocked_lock_lock); - if (waiter->fl_blocker) + if (waiter->c.flc_blocker) status = 0; __locks_wake_up_blocks(waiter); __locks_delete_block(waiter); @@ -725,7 +729,7 @@ int locks_delete_block(struct file_lock *waiter) * The setting of fl_blocker to NULL marks the "done" point in deleting * a block. Paired with acquire at the top of this function. */ - smp_store_release(&waiter->fl_blocker, NULL); + smp_store_release(&waiter->c.flc_blocker, NULL); spin_unlock(&blocked_lock_lock); return status; } @@ -752,17 +756,19 @@ static void __locks_insert_block(struct file_lock *blocker, struct file_lock *)) { struct file_lock *fl; - BUG_ON(!list_empty(&waiter->fl_blocked_member)); + BUG_ON(!list_empty(&waiter->c.flc_blocked_member)); new_blocker: - list_for_each_entry(fl, &blocker->fl_blocked_requests, fl_blocked_member) + list_for_each_entry(fl, &blocker->c.flc_blocked_requests, + c.flc_blocked_member) if (conflict(fl, waiter)) { blocker = fl; goto new_blocker; } - waiter->fl_blocker = blocker; - list_add_tail(&waiter->fl_blocked_member, &blocker->fl_blocked_requests); - if ((blocker->fl_flags & (FL_POSIX|FL_OFDLCK)) == FL_POSIX) + waiter->c.flc_blocker = blocker; + list_add_tail(&waiter->c.flc_blocked_member, + &blocker->c.flc_blocked_requests); + if ((blocker->c.flc_flags & (FL_POSIX|FL_OFDLCK)) == FL_POSIX) locks_insert_global_blocked(waiter); /* The requests in waiter->fl_blocked are known to conflict with @@ -797,7 +803,7 @@ static void locks_wake_up_blocks(struct file_lock *blocker) * fl_blocked_requests list does not require the flc_lock, so we must * recheck list_empty() after acquiring the blocked_lock_lock. */ - if (list_empty(&blocker->fl_blocked_requests)) + if (list_empty(&blocker->c.flc_blocked_requests)) return; spin_lock(&blocked_lock_lock); @@ -808,7 +814,7 @@ static void locks_wake_up_blocks(struct file_lock *blocker) static void locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before) { - list_add_tail(&fl->fl_list, before); + list_add_tail(&fl->c.flc_list, before); locks_insert_global_locks(fl); } @@ -816,7 +822,7 @@ static void locks_unlink_lock_ctx(struct file_lock *fl) { locks_delete_global_locks(fl); - list_del_init(&fl->fl_list); + list_del_init(&fl->c.flc_list); locks_wake_up_blocks(fl); } @@ -825,7 +831,7 @@ locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose) { locks_unlink_lock_ctx(fl); if (dispose) - list_add(&fl->fl_list, dispose); + list_add(&fl->c.flc_list, dispose); else locks_free_lock(fl); } @@ -886,7 +892,7 @@ static bool flock_locks_conflict(struct file_lock *caller_fl, /* FLOCK locks referring to the same filp do not conflict with * each other. */ - if (caller_fl->fl_file == sys_fl->fl_file) + if (caller_fl->c.flc_file == sys_fl->c.flc_file) return false; return locks_conflict(caller_fl, sys_fl); @@ -903,13 +909,13 @@ posix_test_lock(struct file *filp, struct file_lock *fl) ctx = locks_inode_context(inode); if (!ctx || list_empty_careful(&ctx->flc_posix)) { - fl->fl_type = F_UNLCK; + fl->c.flc_type = F_UNLCK; return; } retry: spin_lock(&ctx->flc_lock); - list_for_each_entry(cfl, &ctx->flc_posix, fl_list) { + list_for_each_entry(cfl, &ctx->flc_posix, c.flc_list) { if (!posix_test_locks_conflict(fl, cfl)) continue; if (cfl->fl_lmops && cfl->fl_lmops->lm_lock_expirable @@ -925,7 +931,7 @@ retry: locks_copy_conflock(fl, cfl); goto out; } - fl->fl_type = F_UNLCK; + fl->c.flc_type = F_UNLCK; out: spin_unlock(&ctx->flc_lock); return; @@ -972,10 +978,10 @@ static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl) { struct file_lock *fl; - hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) { + hash_for_each_possible(blocked_hash, fl, c.flc_link, posix_owner_key(block_fl)) { if (posix_same_owner(fl, block_fl)) { - while (fl->fl_blocker) - fl = fl->fl_blocker; + while (fl->c.flc_blocker) + fl = fl->c.flc_blocker; return fl; } } @@ -994,7 +1000,7 @@ static int posix_locks_deadlock(struct file_lock *caller_fl, * This deadlock detector can't reasonably detect deadlocks with * FL_OFDLCK locks, since they aren't owned by a process, per-se. */ - if (caller_fl->fl_flags & FL_OFDLCK) + if (caller_fl->c.flc_flags & FL_OFDLCK) return 0; while ((block_fl = what_owner_is_waiting_for(block_fl))) { @@ -1022,14 +1028,14 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request) bool found = false; LIST_HEAD(dispose); - ctx = locks_get_lock_context(inode, request->fl_type); + ctx = locks_get_lock_context(inode, request->c.flc_type); if (!ctx) { - if (request->fl_type != F_UNLCK) + if (request->c.flc_type != F_UNLCK) return -ENOMEM; - return (request->fl_flags & FL_EXISTS) ? -ENOENT : 0; + return (request->c.flc_flags & FL_EXISTS) ? -ENOENT : 0; } - if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) { + if (!(request->c.flc_flags & FL_ACCESS) && (request->c.flc_type != F_UNLCK)) { new_fl = locks_alloc_lock(); if (!new_fl) return -ENOMEM; @@ -1037,13 +1043,13 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request) percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); - if (request->fl_flags & FL_ACCESS) + if (request->c.flc_flags & FL_ACCESS) goto find_conflict; - list_for_each_entry(fl, &ctx->flc_flock, fl_list) { - if (request->fl_file != fl->fl_file) + list_for_each_entry(fl, &ctx->flc_flock, c.flc_list) { + if (request->c.flc_file != fl->c.flc_file) continue; - if (request->fl_type == fl->fl_type) + if (request->c.flc_type == fl->c.flc_type) goto out; found = true; locks_delete_lock_ctx(fl, &dispose); @@ -1051,23 +1057,23 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request) } if (lock_is_unlock(request)) { - if ((request->fl_flags & FL_EXISTS) && !found) + if ((request->c.flc_flags & FL_EXISTS) && !found) error = -ENOENT; goto out; } find_conflict: - list_for_each_entry(fl, &ctx->flc_flock, fl_list) { + list_for_each_entry(fl, &ctx->flc_flock, c.flc_list) { if (!flock_locks_conflict(request, fl)) continue; error = -EAGAIN; - if (!(request->fl_flags & FL_SLEEP)) + if (!(request->c.flc_flags & FL_SLEEP)) goto out; error = FILE_LOCK_DEFERRED; locks_insert_block(fl, request, flock_locks_conflict); goto out; } - if (request->fl_flags & FL_ACCESS) + if (request->c.flc_flags & FL_ACCESS) goto out; locks_copy_lock(new_fl, request); locks_move_blocks(new_fl, request); @@ -1100,7 +1106,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request, void *owner; void (*func)(void); - ctx = locks_get_lock_context(inode, request->fl_type); + ctx = locks_get_lock_context(inode, request->c.flc_type); if (!ctx) return lock_is_unlock(request) ? 0 : -ENOMEM; @@ -1110,8 +1116,8 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request, * * In some cases we can be sure, that no new locks will be needed */ - if (!(request->fl_flags & FL_ACCESS) && - (request->fl_type != F_UNLCK || + if (!(request->c.flc_flags & FL_ACCESS) && + (request->c.flc_type != F_UNLCK || request->fl_start != 0 || request->fl_end != OFFSET_MAX)) { new_fl = locks_alloc_lock(); new_fl2 = locks_alloc_lock(); @@ -1125,8 +1131,8 @@ retry: * there are any, either return error or put the request on the * blocker's list of waiters and the global blocked_hash. */ - if (request->fl_type != F_UNLCK) { - list_for_each_entry(fl, &ctx->flc_posix, fl_list) { + if (request->c.flc_type != F_UNLCK) { + list_for_each_entry(fl, &ctx->flc_posix, c.flc_list) { if (!posix_locks_conflict(request, fl)) continue; if (fl->fl_lmops && fl->fl_lmops->lm_lock_expirable @@ -1143,7 +1149,7 @@ retry: if (conflock) locks_copy_conflock(conflock, fl); error = -EAGAIN; - if (!(request->fl_flags & FL_SLEEP)) + if (!(request->c.flc_flags & FL_SLEEP)) goto out; /* * Deadlock detection and insertion into the blocked @@ -1168,22 +1174,22 @@ retry: /* If we're just looking for a conflict, we're done. */ error = 0; - if (request->fl_flags & FL_ACCESS) + if (request->c.flc_flags & FL_ACCESS) goto out; /* Find the first old lock with the same owner as the new lock */ - list_for_each_entry(fl, &ctx->flc_posix, fl_list) { + list_for_each_entry(fl, &ctx->flc_posix, c.flc_list) { if (posix_same_owner(request, fl)) break; } /* Process locks with this owner. */ - list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) { + list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, c.flc_list) { if (!posix_same_owner(request, fl)) break; /* Detect adjacent or overlapping regions (if same lock type) */ - if (request->fl_type == fl->fl_type) { + if (request->c.flc_type == fl->c.flc_type) { /* In all comparisons of start vs end, use * "start - 1" rather than "end + 1". If end * is OFFSET_MAX, end + 1 will become negative. @@ -1256,7 +1262,8 @@ retry: locks_move_blocks(new_fl, request); request = new_fl; new_fl = NULL; - locks_insert_lock_ctx(request, &fl->fl_list); + locks_insert_lock_ctx(request, + &fl->c.flc_list); locks_delete_lock_ctx(fl, &dispose); added = true; } @@ -1275,7 +1282,7 @@ retry: error = 0; if (!added) { if (lock_is_unlock(request)) { - if (request->fl_flags & FL_EXISTS) + if (request->c.flc_flags & FL_EXISTS) error = -ENOENT; goto out; } @@ -1286,7 +1293,7 @@ retry: } locks_copy_lock(new_fl, request); locks_move_blocks(new_fl, request); - locks_insert_lock_ctx(new_fl, &fl->fl_list); + locks_insert_lock_ctx(new_fl, &fl->c.flc_list); fl = new_fl; new_fl = NULL; } @@ -1298,7 +1305,7 @@ retry: left = new_fl2; new_fl2 = NULL; locks_copy_lock(left, right); - locks_insert_lock_ctx(left, &fl->fl_list); + locks_insert_lock_ctx(left, &fl->c.flc_list); } right->fl_start = request->fl_end + 1; locks_wake_up_blocks(right); @@ -1359,8 +1366,8 @@ static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl) error = posix_lock_inode(inode, fl, NULL); if (error != FILE_LOCK_DEFERRED) break; - error = wait_event_interruptible(fl->fl_wait, - list_empty(&fl->fl_blocked_member)); + error = wait_event_interruptible(fl->c.flc_wait, + list_empty(&fl->c.flc_blocked_member)); if (error) break; } @@ -1372,10 +1379,10 @@ static void lease_clear_pending(struct file_lock *fl, int arg) { switch (arg) { case F_UNLCK: - fl->fl_flags &= ~FL_UNLOCK_PENDING; + fl->c.flc_flags &= ~FL_UNLOCK_PENDING; fallthrough; case F_RDLCK: - fl->fl_flags &= ~FL_DOWNGRADE_PENDING; + fl->c.flc_flags &= ~FL_DOWNGRADE_PENDING; } } @@ -1389,11 +1396,11 @@ int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose) lease_clear_pending(fl, arg); locks_wake_up_blocks(fl); if (arg == F_UNLCK) { - struct file *filp = fl->fl_file; + struct file *filp = fl->c.flc_file; f_delown(filp); filp->f_owner.signum = 0; - fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync); + fasync_helper(0, fl->c.flc_file, 0, &fl->fl_fasync); if (fl->fl_fasync != NULL) { printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync); fl->fl_fasync = NULL; @@ -1419,7 +1426,7 @@ static void time_out_leases(struct inode *inode, struct list_head *dispose) lockdep_assert_held(&ctx->flc_lock); - list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) { + list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, c.flc_list) { trace_time_out_leases(inode, fl); if (past_time(fl->fl_downgrade_time)) lease_modify(fl, F_RDLCK, dispose); @@ -1435,11 +1442,11 @@ static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker) if (lease->fl_lmops->lm_breaker_owns_lease && lease->fl_lmops->lm_breaker_owns_lease(lease)) return false; - if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT)) { + if ((breaker->c.flc_flags & FL_LAYOUT) != (lease->c.flc_flags & FL_LAYOUT)) { rc = false; goto trace; } - if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE)) { + if ((breaker->c.flc_flags & FL_DELEG) && (lease->c.flc_flags & FL_LEASE)) { rc = false; goto trace; } @@ -1458,7 +1465,7 @@ any_leases_conflict(struct inode *inode, struct file_lock *breaker) lockdep_assert_held(&ctx->flc_lock); - list_for_each_entry(fl, &ctx->flc_lease, fl_list) { + list_for_each_entry(fl, &ctx->flc_lease, c.flc_list) { if (leases_conflict(fl, breaker)) return true; } @@ -1490,7 +1497,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK); if (IS_ERR(new_fl)) return PTR_ERR(new_fl); - new_fl->fl_flags = type; + new_fl->c.flc_flags = type; /* typically we will check that ctx is non-NULL before calling */ ctx = locks_inode_context(inode); @@ -1514,18 +1521,18 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) break_time++; /* so that 0 means no break time */ } - list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) { + list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, c.flc_list) { if (!leases_conflict(fl, new_fl)) continue; if (want_write) { - if (fl->fl_flags & FL_UNLOCK_PENDING) + if (fl->c.flc_flags & FL_UNLOCK_PENDING) continue; - fl->fl_flags |= FL_UNLOCK_PENDING; + fl->c.flc_flags |= FL_UNLOCK_PENDING; fl->fl_break_time = break_time; } else { if (lease_breaking(fl)) continue; - fl->fl_flags |= FL_DOWNGRADE_PENDING; + fl->c.flc_flags |= FL_DOWNGRADE_PENDING; fl->fl_downgrade_time = break_time; } if (fl->fl_lmops->lm_break(fl)) @@ -1542,7 +1549,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) } restart: - fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list); + fl = list_first_entry(&ctx->flc_lease, struct file_lock, c.flc_list); break_time = fl->fl_break_time; if (break_time != 0) break_time -= jiffies; @@ -1554,9 +1561,9 @@ restart: percpu_up_read(&file_rwsem); locks_dispose_list(&dispose); - error = wait_event_interruptible_timeout(new_fl->fl_wait, - list_empty(&new_fl->fl_blocked_member), - break_time); + error = wait_event_interruptible_timeout(new_fl->c.flc_wait, + list_empty(&new_fl->c.flc_blocked_member), + break_time); percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); @@ -1602,7 +1609,7 @@ void lease_get_mtime(struct inode *inode, struct timespec64 *time) if (ctx && !list_empty_careful(&ctx->flc_lease)) { spin_lock(&ctx->flc_lock); fl = list_first_entry_or_null(&ctx->flc_lease, - struct file_lock, fl_list); + struct file_lock, c.flc_list); if (fl && lock_is_write(fl)) has_lease = true; spin_unlock(&ctx->flc_lock); @@ -1649,8 +1656,8 @@ int fcntl_getlease(struct file *filp) percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); time_out_leases(inode, &dispose); - list_for_each_entry(fl, &ctx->flc_lease, fl_list) { - if (fl->fl_file != filp) + list_for_each_entry(fl, &ctx->flc_lease, c.flc_list) { + if (fl->c.flc_file != filp) continue; type = target_leasetype(fl); break; @@ -1715,7 +1722,7 @@ generic_add_lease(struct file *filp, int arg, struct file_lock **flp, void **pri struct file_lock *fl, *my_fl = NULL, *lease; struct inode *inode = file_inode(filp); struct file_lock_context *ctx; - bool is_deleg = (*flp)->fl_flags & FL_DELEG; + bool is_deleg = (*flp)->c.flc_flags & FL_DELEG; int error; LIST_HEAD(dispose); @@ -1741,7 +1748,7 @@ generic_add_lease(struct file *filp, int arg, struct file_lock **flp, void **pri percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); time_out_leases(inode, &dispose); - error = check_conflicting_open(filp, arg, lease->fl_flags); + error = check_conflicting_open(filp, arg, lease->c.flc_flags); if (error) goto out; @@ -1754,9 +1761,9 @@ generic_add_lease(struct file *filp, int arg, struct file_lock **flp, void **pri * except for this filp. */ error = -EAGAIN; - list_for_each_entry(fl, &ctx->flc_lease, fl_list) { - if (fl->fl_file == filp && - fl->fl_owner == lease->fl_owner) { + list_for_each_entry(fl, &ctx->flc_lease, c.flc_list) { + if (fl->c.flc_file == filp && + fl->c.flc_owner == lease->c.flc_owner) { my_fl = fl; continue; } @@ -1771,7 +1778,7 @@ generic_add_lease(struct file *filp, int arg, struct file_lock **flp, void **pri * Modifying our existing lease is OK, but no getting a * new lease if someone else is opening for write: */ - if (fl->fl_flags & FL_UNLOCK_PENDING) + if (fl->c.flc_flags & FL_UNLOCK_PENDING) goto out; } @@ -1798,7 +1805,7 @@ generic_add_lease(struct file *filp, int arg, struct file_lock **flp, void **pri * precedes these checks. */ smp_mb(); - error = check_conflicting_open(filp, arg, lease->fl_flags); + error = check_conflicting_open(filp, arg, lease->c.flc_flags); if (error) { locks_unlink_lock_ctx(lease); goto out; @@ -1834,9 +1841,9 @@ static int generic_delete_lease(struct file *filp, void *owner) percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); - list_for_each_entry(fl, &ctx->flc_lease, fl_list) { - if (fl->fl_file == filp && - fl->fl_owner == owner) { + list_for_each_entry(fl, &ctx->flc_lease, c.flc_list) { + if (fl->c.flc_file == filp && + fl->c.flc_owner == owner) { victim = fl; break; } @@ -2012,8 +2019,8 @@ static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl) error = flock_lock_inode(inode, fl); if (error != FILE_LOCK_DEFERRED) break; - error = wait_event_interruptible(fl->fl_wait, - list_empty(&fl->fl_blocked_member)); + error = wait_event_interruptible(fl->c.flc_wait, + list_empty(&fl->c.flc_blocked_member)); if (error) break; } @@ -2031,7 +2038,7 @@ static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl) int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl) { int res = 0; - switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { + switch (fl->c.flc_flags & (FL_POSIX|FL_FLOCK)) { case FL_POSIX: res = posix_lock_inode_wait(inode, fl); break; @@ -2093,13 +2100,13 @@ SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd) flock_make_lock(f.file, &fl, type); - error = security_file_lock(f.file, fl.fl_type); + error = security_file_lock(f.file, fl.c.flc_type); if (error) goto out_putf; can_sleep = !(cmd & LOCK_NB); if (can_sleep) - fl.fl_flags |= FL_SLEEP; + fl.c.flc_flags |= FL_SLEEP; if (f.file->f_op->flock) error = f.file->f_op->flock(f.file, @@ -2125,7 +2132,7 @@ SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd) */ int vfs_test_lock(struct file *filp, struct file_lock *fl) { - WARN_ON_ONCE(filp != fl->fl_file); + WARN_ON_ONCE(filp != fl->c.flc_file); if (filp->f_op->lock) return filp->f_op->lock(filp, F_GETLK, fl); posix_test_lock(filp, fl); @@ -2145,12 +2152,12 @@ static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns) pid_t vnr; struct pid *pid; - if (fl->fl_flags & FL_OFDLCK) + if (fl->c.flc_flags & FL_OFDLCK) return -1; /* Remote locks report a negative pid value */ - if (fl->fl_pid <= 0) - return fl->fl_pid; + if (fl->c.flc_pid <= 0) + return fl->c.flc_pid; /* * If the flock owner process is dead and its pid has been already @@ -2158,10 +2165,10 @@ static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns) * flock owner pid number in init pidns. */ if (ns == &init_pid_ns) - return (pid_t)fl->fl_pid; + return (pid_t) fl->c.flc_pid; rcu_read_lock(); - pid = find_pid_ns(fl->fl_pid, &init_pid_ns); + pid = find_pid_ns(fl->c.flc_pid, &init_pid_ns); vnr = pid_nr_ns(pid, ns); rcu_read_unlock(); return vnr; @@ -2184,7 +2191,7 @@ static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl) flock->l_len = fl->fl_end == OFFSET_MAX ? 0 : fl->fl_end - fl->fl_start + 1; flock->l_whence = 0; - flock->l_type = fl->fl_type; + flock->l_type = fl->c.flc_type; return 0; } @@ -2196,7 +2203,7 @@ static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl) flock->l_len = fl->fl_end == OFFSET_MAX ? 0 : fl->fl_end - fl->fl_start + 1; flock->l_whence = 0; - flock->l_type = fl->fl_type; + flock->l_type = fl->c.flc_type; } #endif @@ -2225,16 +2232,16 @@ int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock *flock) if (flock->l_pid != 0) goto out; - fl->fl_flags |= FL_OFDLCK; - fl->fl_owner = filp; + fl->c.flc_flags |= FL_OFDLCK; + fl->c.flc_owner = filp; } error = vfs_test_lock(filp, fl); if (error) goto out; - flock->l_type = fl->fl_type; - if (fl->fl_type != F_UNLCK) { + flock->l_type = fl->c.flc_type; + if (fl->c.flc_type != F_UNLCK) { error = posix_lock_to_flock(flock, fl); if (error) goto out; @@ -2281,7 +2288,7 @@ out: */ int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf) { - WARN_ON_ONCE(filp != fl->fl_file); + WARN_ON_ONCE(filp != fl->c.flc_file); if (filp->f_op->lock) return filp->f_op->lock(filp, cmd, fl); else @@ -2294,7 +2301,7 @@ static int do_lock_file_wait(struct file *filp, unsigned int cmd, { int error; - error = security_file_lock(filp, fl->fl_type); + error = security_file_lock(filp, fl->c.flc_type); if (error) return error; @@ -2302,8 +2309,8 @@ static int do_lock_file_wait(struct file *filp, unsigned int cmd, error = vfs_lock_file(filp, cmd, fl, NULL); if (error != FILE_LOCK_DEFERRED) break; - error = wait_event_interruptible(fl->fl_wait, - list_empty(&fl->fl_blocked_member)); + error = wait_event_interruptible(fl->c.flc_wait, + list_empty(&fl->c.flc_blocked_member)); if (error) break; } @@ -2316,13 +2323,13 @@ static int do_lock_file_wait(struct file *filp, unsigned int cmd, static int check_fmode_for_setlk(struct file_lock *fl) { - switch (fl->fl_type) { + switch (fl->c.flc_type) { case F_RDLCK: - if (!(fl->fl_file->f_mode & FMODE_READ)) + if (!(fl->c.flc_file->f_mode & FMODE_READ)) return -EBADF; break; case F_WRLCK: - if (!(fl->fl_file->f_mode & FMODE_WRITE)) + if (!(fl->c.flc_file->f_mode & FMODE_WRITE)) return -EBADF; } return 0; @@ -2361,8 +2368,8 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, goto out; cmd = F_SETLK; - file_lock->fl_flags |= FL_OFDLCK; - file_lock->fl_owner = filp; + file_lock->c.flc_flags |= FL_OFDLCK; + file_lock->c.flc_owner = filp; break; case F_OFD_SETLKW: error = -EINVAL; @@ -2370,11 +2377,11 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, goto out; cmd = F_SETLKW; - file_lock->fl_flags |= FL_OFDLCK; - file_lock->fl_owner = filp; + file_lock->c.flc_flags |= FL_OFDLCK; + file_lock->c.flc_owner = filp; fallthrough; case F_SETLKW: - file_lock->fl_flags |= FL_SLEEP; + file_lock->c.flc_flags |= FL_SLEEP; } error = do_lock_file_wait(filp, cmd, file_lock); @@ -2384,8 +2391,8 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, * lock that was just acquired. There is no need to do that when we're * unlocking though, or for OFD locks. */ - if (!error && file_lock->fl_type != F_UNLCK && - !(file_lock->fl_flags & FL_OFDLCK)) { + if (!error && file_lock->c.flc_type != F_UNLCK && + !(file_lock->c.flc_flags & FL_OFDLCK)) { struct files_struct *files = current->files; /* * We need that spin_lock here - it prevents reordering between @@ -2396,7 +2403,7 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, f = files_lookup_fd_locked(files, fd); spin_unlock(&files->file_lock); if (f != filp) { - file_lock->fl_type = F_UNLCK; + file_lock->c.flc_type = F_UNLCK; error = do_lock_file_wait(filp, cmd, file_lock); WARN_ON_ONCE(error); error = -EBADF; @@ -2435,16 +2442,16 @@ int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 *flock) if (flock->l_pid != 0) goto out; - fl->fl_flags |= FL_OFDLCK; - fl->fl_owner = filp; + fl->c.flc_flags |= FL_OFDLCK; + fl->c.flc_owner = filp; } error = vfs_test_lock(filp, fl); if (error) goto out; - flock->l_type = fl->fl_type; - if (fl->fl_type != F_UNLCK) + flock->l_type = fl->c.flc_type; + if (fl->c.flc_type != F_UNLCK) posix_lock_to_flock64(flock, fl); out: @@ -2484,8 +2491,8 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, goto out; cmd = F_SETLK64; - file_lock->fl_flags |= FL_OFDLCK; - file_lock->fl_owner = filp; + file_lock->c.flc_flags |= FL_OFDLCK; + file_lock->c.flc_owner = filp; break; case F_OFD_SETLKW: error = -EINVAL; @@ -2493,11 +2500,11 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, goto out; cmd = F_SETLKW64; - file_lock->fl_flags |= FL_OFDLCK; - file_lock->fl_owner = filp; + file_lock->c.flc_flags |= FL_OFDLCK; + file_lock->c.flc_owner = filp; fallthrough; case F_SETLKW64: - file_lock->fl_flags |= FL_SLEEP; + file_lock->c.flc_flags |= FL_SLEEP; } error = do_lock_file_wait(filp, cmd, file_lock); @@ -2507,8 +2514,8 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, * lock that was just acquired. There is no need to do that when we're * unlocking though, or for OFD locks. */ - if (!error && file_lock->fl_type != F_UNLCK && - !(file_lock->fl_flags & FL_OFDLCK)) { + if (!error && file_lock->c.flc_type != F_UNLCK && + !(file_lock->c.flc_flags & FL_OFDLCK)) { struct files_struct *files = current->files; /* * We need that spin_lock here - it prevents reordering between @@ -2519,7 +2526,7 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, f = files_lookup_fd_locked(files, fd); spin_unlock(&files->file_lock); if (f != filp) { - file_lock->fl_type = F_UNLCK; + file_lock->c.flc_type = F_UNLCK; error = do_lock_file_wait(filp, cmd, file_lock); WARN_ON_ONCE(error); error = -EBADF; @@ -2553,13 +2560,13 @@ void locks_remove_posix(struct file *filp, fl_owner_t owner) return; locks_init_lock(&lock); - lock.fl_type = F_UNLCK; - lock.fl_flags = FL_POSIX | FL_CLOSE; + lock.c.flc_type = F_UNLCK; + lock.c.flc_flags = FL_POSIX | FL_CLOSE; lock.fl_start = 0; lock.fl_end = OFFSET_MAX; - lock.fl_owner = owner; - lock.fl_pid = current->tgid; - lock.fl_file = filp; + lock.c.flc_owner = owner; + lock.c.flc_pid = current->tgid; + lock.c.flc_file = filp; lock.fl_ops = NULL; lock.fl_lmops = NULL; @@ -2582,7 +2589,7 @@ locks_remove_flock(struct file *filp, struct file_lock_context *flctx) return; flock_make_lock(filp, &fl, F_UNLCK); - fl.fl_flags |= FL_CLOSE; + fl.c.flc_flags |= FL_CLOSE; if (filp->f_op->flock) filp->f_op->flock(filp, F_SETLKW, &fl); @@ -2605,8 +2612,8 @@ locks_remove_lease(struct file *filp, struct file_lock_context *ctx) percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); - list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) - if (filp == fl->fl_file) + list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, c.flc_list) + if (filp == fl->c.flc_file) lease_modify(fl, F_UNLCK, &dispose); spin_unlock(&ctx->flc_lock); percpu_up_read(&file_rwsem); @@ -2650,7 +2657,7 @@ void locks_remove_file(struct file *filp) */ int vfs_cancel_lock(struct file *filp, struct file_lock *fl) { - WARN_ON_ONCE(filp != fl->fl_file); + WARN_ON_ONCE(filp != fl->c.flc_file); if (filp->f_op->lock) return filp->f_op->lock(filp, F_CANCELLK, fl); return 0; @@ -2695,7 +2702,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl, struct inode *inode = NULL; unsigned int pid; struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb); - int type = fl->fl_type; + int type = fl->c.flc_type; pid = locks_translate_pid(fl, proc_pidns); /* @@ -2704,37 +2711,37 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl, * init_pid_ns to get saved lock pid value. */ - if (fl->fl_file != NULL) - inode = file_inode(fl->fl_file); + if (fl->c.flc_file != NULL) + inode = file_inode(fl->c.flc_file); seq_printf(f, "%lld: ", id); if (repeat) seq_printf(f, "%*s", repeat - 1 + (int)strlen(pfx), pfx); - if (fl->fl_flags & FL_POSIX) { - if (fl->fl_flags & FL_ACCESS) + if (fl->c.flc_flags & FL_POSIX) { + if (fl->c.flc_flags & FL_ACCESS) seq_puts(f, "ACCESS"); - else if (fl->fl_flags & FL_OFDLCK) + else if (fl->c.flc_flags & FL_OFDLCK) seq_puts(f, "OFDLCK"); else seq_puts(f, "POSIX "); seq_printf(f, " %s ", (inode == NULL) ? "*NOINODE*" : "ADVISORY "); - } else if (fl->fl_flags & FL_FLOCK) { + } else if (fl->c.flc_flags & FL_FLOCK) { seq_puts(f, "FLOCK ADVISORY "); - } else if (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT)) { + } else if (fl->c.flc_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT)) { type = target_leasetype(fl); - if (fl->fl_flags & FL_DELEG) + if (fl->c.flc_flags & FL_DELEG) seq_puts(f, "DELEG "); else seq_puts(f, "LEASE "); if (lease_breaking(fl)) seq_puts(f, "BREAKING "); - else if (fl->fl_file) + else if (fl->c.flc_file) seq_puts(f, "ACTIVE "); else seq_puts(f, "BREAKER "); @@ -2752,7 +2759,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl, } else { seq_printf(f, "%d :0 ", pid); } - if (fl->fl_flags & FL_POSIX) { + if (fl->c.flc_flags & FL_POSIX) { if (fl->fl_end == OFFSET_MAX) seq_printf(f, "%Ld EOF\n", fl->fl_start); else @@ -2767,13 +2774,14 @@ static struct file_lock *get_next_blocked_member(struct file_lock *node) struct file_lock *tmp; /* NULL node or root node */ - if (node == NULL || node->fl_blocker == NULL) + if (node == NULL || node->c.flc_blocker == NULL) return NULL; /* Next member in the linked list could be itself */ - tmp = list_next_entry(node, fl_blocked_member); - if (list_entry_is_head(tmp, &node->fl_blocker->fl_blocked_requests, fl_blocked_member) - || tmp == node) { + tmp = list_next_entry(node, c.flc_blocked_member); + if (list_entry_is_head(tmp, &node->c.flc_blocker->c.flc_blocked_requests, + c.flc_blocked_member) + || tmp == node) { return NULL; } @@ -2787,13 +2795,13 @@ static int locks_show(struct seq_file *f, void *v) struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb); int level = 0; - cur = hlist_entry(v, struct file_lock, fl_link); + cur = hlist_entry(v, struct file_lock, c.flc_link); if (locks_translate_pid(cur, proc_pidns) == 0) return 0; /* View this crossed linked list as a binary tree, the first member of fl_blocked_requests - * is the left child of current node, the next silibing in fl_blocked_member is the + * is the left child of current node, the next silibing in flc_blocked_member is the * right child, we can alse get the parent of current node from fl_blocker, so this * question becomes traversal of a binary tree */ @@ -2803,17 +2811,18 @@ static int locks_show(struct seq_file *f, void *v) else lock_get_status(f, cur, iter->li_pos, "", level); - if (!list_empty(&cur->fl_blocked_requests)) { + if (!list_empty(&cur->c.flc_blocked_requests)) { /* Turn left */ - cur = list_first_entry_or_null(&cur->fl_blocked_requests, - struct file_lock, fl_blocked_member); + cur = list_first_entry_or_null(&cur->c.flc_blocked_requests, + struct file_lock, + c.flc_blocked_member); level++; } else { /* Turn right */ tmp = get_next_blocked_member(cur); /* Fall back to parent node */ - while (tmp == NULL && cur->fl_blocker != NULL) { - cur = cur->fl_blocker; + while (tmp == NULL && cur->c.flc_blocker != NULL) { + cur = cur->c.flc_blocker; level--; tmp = get_next_blocked_member(cur); } @@ -2830,12 +2839,12 @@ static void __show_fd_locks(struct seq_file *f, { struct file_lock *fl; - list_for_each_entry(fl, head, fl_list) { + list_for_each_entry(fl, head, c.flc_list) { - if (filp != fl->fl_file) + if (filp != fl->c.flc_file) continue; - if (fl->fl_owner != files && - fl->fl_owner != filp) + if (fl->c.flc_owner != files && + fl->c.flc_owner != filp) continue; (*id)++; diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h index 8fb1d41b1c67..4be341b5ead0 100644 --- a/include/trace/events/filelock.h +++ b/include/trace/events/filelock.h @@ -82,11 +82,11 @@ DECLARE_EVENT_CLASS(filelock_lock, __entry->fl = fl ? fl : NULL; __entry->s_dev = inode->i_sb->s_dev; __entry->i_ino = inode->i_ino; - __entry->blocker = fl ? fl->fl_blocker : NULL; - __entry->owner = fl ? fl->fl_owner : NULL; - __entry->pid = fl ? fl->fl_pid : 0; - __entry->flags = fl ? fl->fl_flags : 0; - __entry->type = fl ? fl->fl_type : 0; + __entry->blocker = fl ? fl->c.flc_blocker : NULL; + __entry->owner = fl ? fl->c.flc_owner : NULL; + __entry->pid = fl ? fl->c.flc_pid : 0; + __entry->flags = fl ? fl->c.flc_flags : 0; + __entry->type = fl ? fl->c.flc_type : 0; __entry->fl_start = fl ? fl->fl_start : 0; __entry->fl_end = fl ? fl->fl_end : 0; __entry->ret = ret; @@ -137,10 +137,10 @@ DECLARE_EVENT_CLASS(filelock_lease, __entry->fl = fl ? fl : NULL; __entry->s_dev = inode->i_sb->s_dev; __entry->i_ino = inode->i_ino; - __entry->blocker = fl ? fl->fl_blocker : NULL; - __entry->owner = fl ? fl->fl_owner : NULL; - __entry->flags = fl ? fl->fl_flags : 0; - __entry->type = fl ? fl->fl_type : 0; + __entry->blocker = fl ? fl->c.flc_blocker : NULL; + __entry->owner = fl ? fl->c.flc_owner : NULL; + __entry->flags = fl ? fl->c.flc_flags : 0; + __entry->type = fl ? fl->c.flc_type : 0; __entry->break_time = fl ? fl->fl_break_time : 0; __entry->downgrade_time = fl ? fl->fl_downgrade_time : 0; ), @@ -190,9 +190,9 @@ TRACE_EVENT(generic_add_lease, __entry->wcount = atomic_read(&inode->i_writecount); __entry->rcount = atomic_read(&inode->i_readcount); __entry->icount = atomic_read(&inode->i_count); - __entry->owner = fl->fl_owner; - __entry->flags = fl->fl_flags; - __entry->type = fl->fl_type; + __entry->owner = fl->c.flc_owner; + __entry->flags = fl->c.flc_flags; + __entry->type = fl->c.flc_type; ), TP_printk("dev=0x%x:0x%x ino=0x%lx wcount=%d rcount=%d icount=%d fl_owner=%p fl_flags=%s fl_type=%s", @@ -220,11 +220,11 @@ TRACE_EVENT(leases_conflict, TP_fast_assign( __entry->lease = lease; - __entry->l_fl_flags = lease->fl_flags; - __entry->l_fl_type = lease->fl_type; + __entry->l_fl_flags = lease->c.flc_flags; + __entry->l_fl_type = lease->c.flc_type; __entry->breaker = breaker; - __entry->b_fl_flags = breaker->fl_flags; - __entry->b_fl_type = breaker->fl_type; + __entry->b_fl_flags = breaker->c.flc_flags; + __entry->b_fl_type = breaker->c.flc_type; __entry->conflict = conflict; ), -- cgit v1.2.3 From fde4951834c22fc634c1e1238b874f894ef46c9c Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:02:00 -0500 Subject: filelock: convert more internal functions to use file_lock_core Convert more internal fs/locks.c functions to take and deal with struct file_lock_core instead of struct file_lock: - locks_dump_ctx_list - locks_check_ctx_file_list - locks_release_private - locks_owner_has_blockers Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-19-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/locks.c | 51 +++++++++++++++++++++++++-------------------------- 1 file changed, 25 insertions(+), 26 deletions(-) (limited to 'fs') diff --git a/fs/locks.c b/fs/locks.c index f418c6e31219..5d25a3f53c9d 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -197,13 +197,12 @@ out: static void locks_dump_ctx_list(struct list_head *list, char *list_type) { - struct file_lock *fl; + struct file_lock_core *flc; - list_for_each_entry(fl, list, c.flc_list) { - pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, - fl->c.flc_owner, fl->c.flc_flags, - fl->c.flc_type, fl->c.flc_pid); - } + list_for_each_entry(flc, list, flc_list) + pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", + list_type, flc->flc_owner, flc->flc_flags, + flc->flc_type, flc->flc_pid); } static void @@ -224,20 +223,19 @@ locks_check_ctx_lists(struct inode *inode) } static void -locks_check_ctx_file_list(struct file *filp, struct list_head *list, - char *list_type) +locks_check_ctx_file_list(struct file *filp, struct list_head *list, char *list_type) { - struct file_lock *fl; + struct file_lock_core *flc; struct inode *inode = file_inode(filp); - list_for_each_entry(fl, list, c.flc_list) - if (fl->c.flc_file == filp) + list_for_each_entry(flc, list, flc_list) + if (flc->flc_file == filp) pr_warn("Leaked %s lock on dev=0x%x:0x%x ino=0x%lx " " fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev), inode->i_ino, - fl->c.flc_owner, fl->c.flc_flags, - fl->c.flc_type, fl->c.flc_pid); + flc->flc_owner, flc->flc_flags, + flc->flc_type, flc->flc_pid); } void @@ -274,11 +272,13 @@ EXPORT_SYMBOL_GPL(locks_alloc_lock); void locks_release_private(struct file_lock *fl) { - BUG_ON(waitqueue_active(&fl->c.flc_wait)); - BUG_ON(!list_empty(&fl->c.flc_list)); - BUG_ON(!list_empty(&fl->c.flc_blocked_requests)); - BUG_ON(!list_empty(&fl->c.flc_blocked_member)); - BUG_ON(!hlist_unhashed(&fl->c.flc_link)); + struct file_lock_core *flc = &fl->c; + + BUG_ON(waitqueue_active(&flc->flc_wait)); + BUG_ON(!list_empty(&flc->flc_list)); + BUG_ON(!list_empty(&flc->flc_blocked_requests)); + BUG_ON(!list_empty(&flc->flc_blocked_member)); + BUG_ON(!hlist_unhashed(&flc->flc_link)); if (fl->fl_ops) { if (fl->fl_ops->fl_release_private) @@ -288,8 +288,8 @@ void locks_release_private(struct file_lock *fl) if (fl->fl_lmops) { if (fl->fl_lmops->lm_put_owner) { - fl->fl_lmops->lm_put_owner(fl->c.flc_owner); - fl->c.flc_owner = NULL; + fl->fl_lmops->lm_put_owner(flc->flc_owner); + flc->flc_owner = NULL; } fl->fl_lmops = NULL; } @@ -305,16 +305,15 @@ EXPORT_SYMBOL_GPL(locks_release_private); * %true: @owner has at least one blocker * %false: @owner has no blockers */ -bool locks_owner_has_blockers(struct file_lock_context *flctx, - fl_owner_t owner) +bool locks_owner_has_blockers(struct file_lock_context *flctx, fl_owner_t owner) { - struct file_lock *fl; + struct file_lock_core *flc; spin_lock(&flctx->flc_lock); - list_for_each_entry(fl, &flctx->flc_posix, c.flc_list) { - if (fl->c.flc_owner != owner) + list_for_each_entry(flc, &flctx->flc_posix, flc_list) { + if (flc->flc_owner != owner) continue; - if (!list_empty(&fl->c.flc_blocked_requests)) { + if (!list_empty(&flc->flc_blocked_requests)) { spin_unlock(&flctx->flc_lock); return true; } -- cgit v1.2.3 From 9bb430a89d2dfce58a3d61a3a04e149109d3934e Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:02:01 -0500 Subject: filelock: make posix_same_owner take file_lock_core pointers Change posix_same_owner to take struct file_lock_core pointers, and convert the callers to pass those in. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-20-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/locks.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/locks.c b/fs/locks.c index 5d25a3f53c9d..9ff331b55b7a 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -590,9 +590,9 @@ static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2) /* * Check whether two locks have the same owner. */ -static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2) +static int posix_same_owner(struct file_lock_core *fl1, struct file_lock_core *fl2) { - return fl1->c.flc_owner == fl2->c.flc_owner; + return fl1->flc_owner == fl2->flc_owner; } /* Must be called with the flc_lock held! */ @@ -857,7 +857,7 @@ static bool posix_locks_conflict(struct file_lock *caller_fl, /* POSIX locks owned by the same process do not conflict with * each other. */ - if (posix_same_owner(caller_fl, sys_fl)) + if (posix_same_owner(&caller_fl->c, &sys_fl->c)) return false; /* Check whether they overlap */ @@ -875,7 +875,7 @@ static bool posix_test_locks_conflict(struct file_lock *caller_fl, { /* F_UNLCK checks any locks on the same fd. */ if (lock_is_unlock(caller_fl)) { - if (!posix_same_owner(caller_fl, sys_fl)) + if (!posix_same_owner(&caller_fl->c, &sys_fl->c)) return false; return locks_overlap(caller_fl, sys_fl); } @@ -978,7 +978,7 @@ static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl) struct file_lock *fl; hash_for_each_possible(blocked_hash, fl, c.flc_link, posix_owner_key(block_fl)) { - if (posix_same_owner(fl, block_fl)) { + if (posix_same_owner(&fl->c, &block_fl->c)) { while (fl->c.flc_blocker) fl = fl->c.flc_blocker; return fl; @@ -1005,7 +1005,7 @@ static int posix_locks_deadlock(struct file_lock *caller_fl, while ((block_fl = what_owner_is_waiting_for(block_fl))) { if (i++ > MAX_DEADLK_ITERATIONS) return 0; - if (posix_same_owner(caller_fl, block_fl)) + if (posix_same_owner(&caller_fl->c, &block_fl->c)) return 1; } return 0; @@ -1178,13 +1178,13 @@ retry: /* Find the first old lock with the same owner as the new lock */ list_for_each_entry(fl, &ctx->flc_posix, c.flc_list) { - if (posix_same_owner(request, fl)) + if (posix_same_owner(&request->c, &fl->c)) break; } /* Process locks with this owner. */ list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, c.flc_list) { - if (!posix_same_owner(request, fl)) + if (!posix_same_owner(&request->c, &fl->c)) break; /* Detect adjacent or overlapping regions (if same lock type) */ -- cgit v1.2.3 From ad399740bd41da5558d8b8b2b19481740ca063cb Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:02:02 -0500 Subject: filelock: convert posix_owner_key to take file_lock_core arg Convert posix_owner_key to take struct file_lock_core pointer, and fix up the callers to pass one in. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-21-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/locks.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/locks.c b/fs/locks.c index 9ff331b55b7a..1cfd02562e9f 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -630,9 +630,9 @@ static void locks_delete_global_locks(struct file_lock *fl) } static unsigned long -posix_owner_key(struct file_lock *fl) +posix_owner_key(struct file_lock_core *flc) { - return (unsigned long) fl->c.flc_owner; + return (unsigned long) flc->flc_owner; } static void locks_insert_global_blocked(struct file_lock *waiter) @@ -640,7 +640,7 @@ static void locks_insert_global_blocked(struct file_lock *waiter) lockdep_assert_held(&blocked_lock_lock); hash_add(blocked_hash, &waiter->c.flc_link, - posix_owner_key(waiter)); + posix_owner_key(&waiter->c)); } static void locks_delete_global_blocked(struct file_lock *waiter) @@ -977,7 +977,7 @@ static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl) { struct file_lock *fl; - hash_for_each_possible(blocked_hash, fl, c.flc_link, posix_owner_key(block_fl)) { + hash_for_each_possible(blocked_hash, fl, c.flc_link, posix_owner_key(&block_fl->c)) { if (posix_same_owner(&fl->c, &block_fl->c)) { while (fl->c.flc_blocker) fl = fl->c.flc_blocker; -- cgit v1.2.3 From 4629172fd7290233d65a08ce2f2e2cbbcdbcd255 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:02:03 -0500 Subject: filelock: make locks_{insert,delete}_global_locks take file_lock_core arg Convert these functions to take a file_lock_core instead of a file_lock. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-22-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/locks.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/locks.c b/fs/locks.c index 1cfd02562e9f..fa9b2beed0d7 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -596,20 +596,20 @@ static int posix_same_owner(struct file_lock_core *fl1, struct file_lock_core *f } /* Must be called with the flc_lock held! */ -static void locks_insert_global_locks(struct file_lock *fl) +static void locks_insert_global_locks(struct file_lock_core *flc) { struct file_lock_list_struct *fll = this_cpu_ptr(&file_lock_list); percpu_rwsem_assert_held(&file_rwsem); spin_lock(&fll->lock); - fl->c.flc_link_cpu = smp_processor_id(); - hlist_add_head(&fl->c.flc_link, &fll->hlist); + flc->flc_link_cpu = smp_processor_id(); + hlist_add_head(&flc->flc_link, &fll->hlist); spin_unlock(&fll->lock); } /* Must be called with the flc_lock held! */ -static void locks_delete_global_locks(struct file_lock *fl) +static void locks_delete_global_locks(struct file_lock_core *flc) { struct file_lock_list_struct *fll; @@ -620,12 +620,12 @@ static void locks_delete_global_locks(struct file_lock *fl) * is done while holding the flc_lock, and new insertions into the list * also require that it be held. */ - if (hlist_unhashed(&fl->c.flc_link)) + if (hlist_unhashed(&flc->flc_link)) return; - fll = per_cpu_ptr(&file_lock_list, fl->c.flc_link_cpu); + fll = per_cpu_ptr(&file_lock_list, flc->flc_link_cpu); spin_lock(&fll->lock); - hlist_del_init(&fl->c.flc_link); + hlist_del_init(&flc->flc_link); spin_unlock(&fll->lock); } @@ -814,13 +814,13 @@ static void locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before) { list_add_tail(&fl->c.flc_list, before); - locks_insert_global_locks(fl); + locks_insert_global_locks(&fl->c); } static void locks_unlink_lock_ctx(struct file_lock *fl) { - locks_delete_global_locks(fl); + locks_delete_global_locks(&fl->c); list_del_init(&fl->c.flc_list); locks_wake_up_blocks(fl); } -- cgit v1.2.3 From 1a6c75d4bbd297a2121e3ea1e21aa964d65cbc5a Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:02:04 -0500 Subject: filelock: convert locks_{insert,delete}_global_blocked Have locks_insert_global_blocked and locks_delete_global_blocked take a struct file_lock_core pointer. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-23-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/locks.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/locks.c b/fs/locks.c index fa9b2beed0d7..ef67a5a7bae8 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -635,19 +635,18 @@ posix_owner_key(struct file_lock_core *flc) return (unsigned long) flc->flc_owner; } -static void locks_insert_global_blocked(struct file_lock *waiter) +static void locks_insert_global_blocked(struct file_lock_core *waiter) { lockdep_assert_held(&blocked_lock_lock); - hash_add(blocked_hash, &waiter->c.flc_link, - posix_owner_key(&waiter->c)); + hash_add(blocked_hash, &waiter->flc_link, posix_owner_key(waiter)); } -static void locks_delete_global_blocked(struct file_lock *waiter) +static void locks_delete_global_blocked(struct file_lock_core *waiter) { lockdep_assert_held(&blocked_lock_lock); - hash_del(&waiter->c.flc_link); + hash_del(&waiter->flc_link); } /* Remove waiter from blocker's block list. @@ -657,7 +656,7 @@ static void locks_delete_global_blocked(struct file_lock *waiter) */ static void __locks_delete_block(struct file_lock *waiter) { - locks_delete_global_blocked(waiter); + locks_delete_global_blocked(&waiter->c); list_del_init(&waiter->c.flc_blocked_member); } @@ -768,7 +767,7 @@ new_blocker: list_add_tail(&waiter->c.flc_blocked_member, &blocker->c.flc_blocked_requests); if ((blocker->c.flc_flags & (FL_POSIX|FL_OFDLCK)) == FL_POSIX) - locks_insert_global_blocked(waiter); + locks_insert_global_blocked(&waiter->c); /* The requests in waiter->fl_blocked are known to conflict with * waiter, but might not conflict with blocker, or the requests -- cgit v1.2.3 From 1a62c22a156f7235acdbdb7ca0dcddf5062744b0 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:02:05 -0500 Subject: filelock: make __locks_delete_block and __locks_wake_up_blocks take file_lock_core Convert __locks_delete_block and __locks_wake_up_blocks to take a struct file_lock_core pointer. While we could do this in another way, we're going to need to add a file_lock() helper function later anyway, so introduce and use it now. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-24-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/locks.c | 45 +++++++++++++++++++++++++++------------------ 1 file changed, 27 insertions(+), 18 deletions(-) (limited to 'fs') diff --git a/fs/locks.c b/fs/locks.c index ef67a5a7bae8..1e8b943bd7f9 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -69,6 +69,11 @@ #include +static struct file_lock *file_lock(struct file_lock_core *flc) +{ + return container_of(flc, struct file_lock, c); +} + static bool lease_breaking(struct file_lock *fl) { return fl->c.flc_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING); @@ -654,31 +659,35 @@ static void locks_delete_global_blocked(struct file_lock_core *waiter) * * Must be called with blocked_lock_lock held. */ -static void __locks_delete_block(struct file_lock *waiter) +static void __locks_delete_block(struct file_lock_core *waiter) { - locks_delete_global_blocked(&waiter->c); - list_del_init(&waiter->c.flc_blocked_member); + locks_delete_global_blocked(waiter); + list_del_init(&waiter->flc_blocked_member); } -static void __locks_wake_up_blocks(struct file_lock *blocker) +static void __locks_wake_up_blocks(struct file_lock_core *blocker) { - while (!list_empty(&blocker->c.flc_blocked_requests)) { - struct file_lock *waiter; + while (!list_empty(&blocker->flc_blocked_requests)) { + struct file_lock_core *waiter; + struct file_lock *fl; + + waiter = list_first_entry(&blocker->flc_blocked_requests, + struct file_lock_core, flc_blocked_member); - waiter = list_first_entry(&blocker->c.flc_blocked_requests, - struct file_lock, c.flc_blocked_member); + fl = file_lock(waiter); __locks_delete_block(waiter); - if (waiter->fl_lmops && waiter->fl_lmops->lm_notify) - waiter->fl_lmops->lm_notify(waiter); + if ((waiter->flc_flags & (FL_POSIX | FL_FLOCK)) && + fl->fl_lmops && fl->fl_lmops->lm_notify) + fl->fl_lmops->lm_notify(fl); else - locks_wake_up(waiter); + locks_wake_up(fl); /* - * The setting of fl_blocker to NULL marks the "done" + * The setting of flc_blocker to NULL marks the "done" * point in deleting a block. Paired with acquire at the top * of locks_delete_block(). */ - smp_store_release(&waiter->c.flc_blocker, NULL); + smp_store_release(&waiter->flc_blocker, NULL); } } @@ -720,8 +729,8 @@ int locks_delete_block(struct file_lock *waiter) spin_lock(&blocked_lock_lock); if (waiter->c.flc_blocker) status = 0; - __locks_wake_up_blocks(waiter); - __locks_delete_block(waiter); + __locks_wake_up_blocks(&waiter->c); + __locks_delete_block(&waiter->c); /* * The setting of fl_blocker to NULL marks the "done" point in deleting @@ -773,7 +782,7 @@ new_blocker: * waiter, but might not conflict with blocker, or the requests * and lock which block it. So they all need to be woken. */ - __locks_wake_up_blocks(waiter); + __locks_wake_up_blocks(&waiter->c); } /* Must be called with flc_lock held. */ @@ -805,7 +814,7 @@ static void locks_wake_up_blocks(struct file_lock *blocker) return; spin_lock(&blocked_lock_lock); - __locks_wake_up_blocks(blocker); + __locks_wake_up_blocks(&blocker->c); spin_unlock(&blocked_lock_lock); } @@ -1159,7 +1168,7 @@ retry: * Ensure that we don't find any locks blocked on this * request during deadlock detection. */ - __locks_wake_up_blocks(request); + __locks_wake_up_blocks(&request->c); if (likely(!posix_locks_deadlock(request, fl))) { error = FILE_LOCK_DEFERRED; __locks_insert_block(fl, request, -- cgit v1.2.3 From b6be3714005c3933886be71011f19119e219e77c Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:02:06 -0500 Subject: filelock: convert __locks_insert_block, conflict and deadlock checks to use file_lock_core Have both __locks_insert_block and the deadlock and conflict checking functions take a struct file_lock_core pointer instead of a struct file_lock one. Also, change posix_locks_deadlock to return bool. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-25-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/locks.c | 132 +++++++++++++++++++++++++++++++++---------------------------- 1 file changed, 72 insertions(+), 60 deletions(-) (limited to 'fs') diff --git a/fs/locks.c b/fs/locks.c index 1e8b943bd7f9..0dc1c9da858c 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -757,39 +757,41 @@ EXPORT_SYMBOL(locks_delete_block); * waiters, and add beneath any waiter that blocks the new waiter. * Thus wakeups don't happen until needed. */ -static void __locks_insert_block(struct file_lock *blocker, - struct file_lock *waiter, - bool conflict(struct file_lock *, - struct file_lock *)) +static void __locks_insert_block(struct file_lock *blocker_fl, + struct file_lock *waiter_fl, + bool conflict(struct file_lock_core *, + struct file_lock_core *)) { - struct file_lock *fl; - BUG_ON(!list_empty(&waiter->c.flc_blocked_member)); + struct file_lock_core *blocker = &blocker_fl->c; + struct file_lock_core *waiter = &waiter_fl->c; + struct file_lock_core *flc; + BUG_ON(!list_empty(&waiter->flc_blocked_member)); new_blocker: - list_for_each_entry(fl, &blocker->c.flc_blocked_requests, - c.flc_blocked_member) - if (conflict(fl, waiter)) { - blocker = fl; + list_for_each_entry(flc, &blocker->flc_blocked_requests, flc_blocked_member) + if (conflict(flc, waiter)) { + blocker = flc; goto new_blocker; } - waiter->c.flc_blocker = blocker; - list_add_tail(&waiter->c.flc_blocked_member, - &blocker->c.flc_blocked_requests); - if ((blocker->c.flc_flags & (FL_POSIX|FL_OFDLCK)) == FL_POSIX) - locks_insert_global_blocked(&waiter->c); + waiter->flc_blocker = file_lock(blocker); + list_add_tail(&waiter->flc_blocked_member, + &blocker->flc_blocked_requests); - /* The requests in waiter->fl_blocked are known to conflict with + if ((blocker->flc_flags & (FL_POSIX|FL_OFDLCK)) == (FL_POSIX|FL_OFDLCK)) + locks_insert_global_blocked(waiter); + + /* The requests in waiter->flc_blocked are known to conflict with * waiter, but might not conflict with blocker, or the requests * and lock which block it. So they all need to be woken. */ - __locks_wake_up_blocks(&waiter->c); + __locks_wake_up_blocks(waiter); } /* Must be called with flc_lock held. */ static void locks_insert_block(struct file_lock *blocker, struct file_lock *waiter, - bool conflict(struct file_lock *, - struct file_lock *)) + bool conflict(struct file_lock_core *, + struct file_lock_core *)) { spin_lock(&blocked_lock_lock); __locks_insert_block(blocker, waiter, conflict); @@ -846,12 +848,12 @@ locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose) /* Determine if lock sys_fl blocks lock caller_fl. Common functionality * checks for shared/exclusive status of overlapping locks. */ -static bool locks_conflict(struct file_lock *caller_fl, - struct file_lock *sys_fl) +static bool locks_conflict(struct file_lock_core *caller_flc, + struct file_lock_core *sys_flc) { - if (lock_is_write(sys_fl)) + if (sys_flc->flc_type == F_WRLCK) return true; - if (lock_is_write(caller_fl)) + if (caller_flc->flc_type == F_WRLCK) return true; return false; } @@ -859,20 +861,23 @@ static bool locks_conflict(struct file_lock *caller_fl, /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific * checking before calling the locks_conflict(). */ -static bool posix_locks_conflict(struct file_lock *caller_fl, - struct file_lock *sys_fl) +static bool posix_locks_conflict(struct file_lock_core *caller_flc, + struct file_lock_core *sys_flc) { + struct file_lock *caller_fl = file_lock(caller_flc); + struct file_lock *sys_fl = file_lock(sys_flc); + /* POSIX locks owned by the same process do not conflict with * each other. */ - if (posix_same_owner(&caller_fl->c, &sys_fl->c)) + if (posix_same_owner(caller_flc, sys_flc)) return false; /* Check whether they overlap */ if (!locks_overlap(caller_fl, sys_fl)) return false; - return locks_conflict(caller_fl, sys_fl); + return locks_conflict(caller_flc, sys_flc); } /* Determine if lock sys_fl blocks lock caller_fl. Used on xx_GETLK @@ -881,28 +886,31 @@ static bool posix_locks_conflict(struct file_lock *caller_fl, static bool posix_test_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) { + struct file_lock_core *caller = &caller_fl->c; + struct file_lock_core *sys = &sys_fl->c; + /* F_UNLCK checks any locks on the same fd. */ if (lock_is_unlock(caller_fl)) { - if (!posix_same_owner(&caller_fl->c, &sys_fl->c)) + if (!posix_same_owner(caller, sys)) return false; return locks_overlap(caller_fl, sys_fl); } - return posix_locks_conflict(caller_fl, sys_fl); + return posix_locks_conflict(caller, sys); } /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific * checking before calling the locks_conflict(). */ -static bool flock_locks_conflict(struct file_lock *caller_fl, - struct file_lock *sys_fl) +static bool flock_locks_conflict(struct file_lock_core *caller_flc, + struct file_lock_core *sys_flc) { /* FLOCK locks referring to the same filp do not conflict with * each other. */ - if (caller_fl->c.flc_file == sys_fl->c.flc_file) + if (caller_flc->flc_file == sys_flc->flc_file) return false; - return locks_conflict(caller_fl, sys_fl); + return locks_conflict(caller_flc, sys_flc); } void @@ -980,25 +988,27 @@ EXPORT_SYMBOL(posix_test_lock); #define MAX_DEADLK_ITERATIONS 10 -/* Find a lock that the owner of the given block_fl is blocking on. */ -static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl) +/* Find a lock that the owner of the given @blocker is blocking on. */ +static struct file_lock_core *what_owner_is_waiting_for(struct file_lock_core *blocker) { - struct file_lock *fl; + struct file_lock_core *flc; - hash_for_each_possible(blocked_hash, fl, c.flc_link, posix_owner_key(&block_fl->c)) { - if (posix_same_owner(&fl->c, &block_fl->c)) { - while (fl->c.flc_blocker) - fl = fl->c.flc_blocker; - return fl; + hash_for_each_possible(blocked_hash, flc, flc_link, posix_owner_key(blocker)) { + if (posix_same_owner(flc, blocker)) { + while (flc->flc_blocker) + flc = &flc->flc_blocker->c; + return flc; } } return NULL; } /* Must be called with the blocked_lock_lock held! */ -static int posix_locks_deadlock(struct file_lock *caller_fl, - struct file_lock *block_fl) +static bool posix_locks_deadlock(struct file_lock *caller_fl, + struct file_lock *block_fl) { + struct file_lock_core *caller = &caller_fl->c; + struct file_lock_core *blocker = &block_fl->c; int i = 0; lockdep_assert_held(&blocked_lock_lock); @@ -1007,16 +1017,16 @@ static int posix_locks_deadlock(struct file_lock *caller_fl, * This deadlock detector can't reasonably detect deadlocks with * FL_OFDLCK locks, since they aren't owned by a process, per-se. */ - if (caller_fl->c.flc_flags & FL_OFDLCK) - return 0; + if (caller->flc_flags & FL_OFDLCK) + return false; - while ((block_fl = what_owner_is_waiting_for(block_fl))) { + while ((blocker = what_owner_is_waiting_for(blocker))) { if (i++ > MAX_DEADLK_ITERATIONS) - return 0; - if (posix_same_owner(&caller_fl->c, &block_fl->c)) - return 1; + return false; + if (posix_same_owner(caller, blocker)) + return true; } - return 0; + return false; } /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks @@ -1071,7 +1081,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request) find_conflict: list_for_each_entry(fl, &ctx->flc_flock, c.flc_list) { - if (!flock_locks_conflict(request, fl)) + if (!flock_locks_conflict(&request->c, &fl->c)) continue; error = -EAGAIN; if (!(request->c.flc_flags & FL_SLEEP)) @@ -1140,7 +1150,7 @@ retry: */ if (request->c.flc_type != F_UNLCK) { list_for_each_entry(fl, &ctx->flc_posix, c.flc_list) { - if (!posix_locks_conflict(request, fl)) + if (!posix_locks_conflict(&request->c, &fl->c)) continue; if (fl->fl_lmops && fl->fl_lmops->lm_lock_expirable && (*fl->fl_lmops->lm_lock_expirable)(fl)) { @@ -1442,23 +1452,25 @@ static void time_out_leases(struct inode *inode, struct list_head *dispose) } } -static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker) +static bool leases_conflict(struct file_lock_core *lc, struct file_lock_core *bc) { bool rc; + struct file_lock *lease = file_lock(lc); + struct file_lock *breaker = file_lock(bc); if (lease->fl_lmops->lm_breaker_owns_lease && lease->fl_lmops->lm_breaker_owns_lease(lease)) return false; - if ((breaker->c.flc_flags & FL_LAYOUT) != (lease->c.flc_flags & FL_LAYOUT)) { + if ((bc->flc_flags & FL_LAYOUT) != (lc->flc_flags & FL_LAYOUT)) { rc = false; goto trace; } - if ((breaker->c.flc_flags & FL_DELEG) && (lease->c.flc_flags & FL_LEASE)) { + if ((bc->flc_flags & FL_DELEG) && (lc->flc_flags & FL_LEASE)) { rc = false; goto trace; } - rc = locks_conflict(breaker, lease); + rc = locks_conflict(bc, lc); trace: trace_leases_conflict(rc, lease, breaker); return rc; @@ -1468,12 +1480,12 @@ static bool any_leases_conflict(struct inode *inode, struct file_lock *breaker) { struct file_lock_context *ctx = inode->i_flctx; - struct file_lock *fl; + struct file_lock_core *flc; lockdep_assert_held(&ctx->flc_lock); - list_for_each_entry(fl, &ctx->flc_lease, c.flc_list) { - if (leases_conflict(fl, breaker)) + list_for_each_entry(flc, &ctx->flc_lease, flc_list) { + if (leases_conflict(flc, &breaker->c)) return true; } return false; @@ -1529,7 +1541,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) } list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, c.flc_list) { - if (!leases_conflict(fl, new_fl)) + if (!leases_conflict(&fl->c, &new_fl->c)) continue; if (want_write) { if (fl->c.flc_flags & FL_UNLOCK_PENDING) -- cgit v1.2.3 From b6aaba5b76e9596cb4d62d081cca41e114becacc Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:02:07 -0500 Subject: filelock: convert fl_blocker to file_lock_core Both locks and leases deal with fl_blocker. Switch the fl_blocker pointer in struct file_lock_core to point to the file_lock_core of the blocker instead of a file_lock structure. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-26-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/locks.c | 16 ++++++++-------- include/linux/filelock.h | 2 +- include/trace/events/filelock.h | 4 ++-- 3 files changed, 11 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/locks.c b/fs/locks.c index 0dc1c9da858c..0aa1c94671cd 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -400,7 +400,7 @@ static void locks_move_blocks(struct file_lock *new, struct file_lock *fl) /* * As ctx->flc_lock is held, new requests cannot be added to - * ->fl_blocked_requests, so we don't need a lock to check if it + * ->flc_blocked_requests, so we don't need a lock to check if it * is empty. */ if (list_empty(&fl->c.flc_blocked_requests)) @@ -410,7 +410,7 @@ static void locks_move_blocks(struct file_lock *new, struct file_lock *fl) &new->c.flc_blocked_requests); list_for_each_entry(f, &new->c.flc_blocked_requests, c.flc_blocked_member) - f->c.flc_blocker = new; + f->c.flc_blocker = &new->c; spin_unlock(&blocked_lock_lock); } @@ -773,7 +773,7 @@ new_blocker: blocker = flc; goto new_blocker; } - waiter->flc_blocker = file_lock(blocker); + waiter->flc_blocker = blocker; list_add_tail(&waiter->flc_blocked_member, &blocker->flc_blocked_requests); @@ -996,7 +996,7 @@ static struct file_lock_core *what_owner_is_waiting_for(struct file_lock_core *b hash_for_each_possible(blocked_hash, flc, flc_link, posix_owner_key(blocker)) { if (posix_same_owner(flc, blocker)) { while (flc->flc_blocker) - flc = &flc->flc_blocker->c; + flc = flc->flc_blocker; return flc; } } @@ -2798,9 +2798,9 @@ static struct file_lock *get_next_blocked_member(struct file_lock *node) /* Next member in the linked list could be itself */ tmp = list_next_entry(node, c.flc_blocked_member); - if (list_entry_is_head(tmp, &node->c.flc_blocker->c.flc_blocked_requests, - c.flc_blocked_member) - || tmp == node) { + if (list_entry_is_head(tmp, &node->c.flc_blocker->flc_blocked_requests, + c.flc_blocked_member) + || tmp == node) { return NULL; } @@ -2841,7 +2841,7 @@ static int locks_show(struct seq_file *f, void *v) tmp = get_next_blocked_member(cur); /* Fall back to parent node */ while (tmp == NULL && cur->c.flc_blocker != NULL) { - cur = cur->c.flc_blocker; + cur = file_lock(cur->c.flc_blocker); level--; tmp = get_next_blocked_member(cur); } diff --git a/include/linux/filelock.h b/include/linux/filelock.h index d1fba98744a7..701780734ae1 100644 --- a/include/linux/filelock.h +++ b/include/linux/filelock.h @@ -87,7 +87,7 @@ bool opens_in_grace(struct net *); */ struct file_lock_core { - struct file_lock *flc_blocker; /* The lock that is blocking us */ + struct file_lock_core *flc_blocker; /* The lock that is blocking us */ struct list_head flc_list; /* link into file_lock_context */ struct hlist_node flc_link; /* node in global lists */ struct list_head flc_blocked_requests; /* list of requests with diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h index 4be341b5ead0..c778061c6249 100644 --- a/include/trace/events/filelock.h +++ b/include/trace/events/filelock.h @@ -68,7 +68,7 @@ DECLARE_EVENT_CLASS(filelock_lock, __field(struct file_lock *, fl) __field(unsigned long, i_ino) __field(dev_t, s_dev) - __field(struct file_lock *, blocker) + __field(struct file_lock_core *, blocker) __field(fl_owner_t, owner) __field(unsigned int, pid) __field(unsigned int, flags) @@ -125,7 +125,7 @@ DECLARE_EVENT_CLASS(filelock_lease, __field(struct file_lock *, fl) __field(unsigned long, i_ino) __field(dev_t, s_dev) - __field(struct file_lock *, blocker) + __field(struct file_lock_core *, blocker) __field(fl_owner_t, owner) __field(unsigned int, flags) __field(unsigned char, type) -- cgit v1.2.3 From e8a166cf3d72373d71af45438b8cbb05dc5e3cf6 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:02:08 -0500 Subject: filelock: clean up locks_delete_block internals Rework the internals of locks_delete_block to use struct file_lock_core (mostly just for clarity's sake). The prototype is not changed. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-27-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/locks.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/locks.c b/fs/locks.c index 0aa1c94671cd..a2be1e0b5a94 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -697,9 +697,10 @@ static void __locks_wake_up_blocks(struct file_lock_core *blocker) * * lockd/nfsd need to disconnect the lock while working on it. */ -int locks_delete_block(struct file_lock *waiter) +int locks_delete_block(struct file_lock *waiter_fl) { int status = -ENOENT; + struct file_lock_core *waiter = &waiter_fl->c; /* * If fl_blocker is NULL, it won't be set again as this thread "owns" @@ -722,21 +723,21 @@ int locks_delete_block(struct file_lock *waiter) * no new locks can be inserted into its fl_blocked_requests list, and * can avoid doing anything further if the list is empty. */ - if (!smp_load_acquire(&waiter->c.flc_blocker) && - list_empty(&waiter->c.flc_blocked_requests)) + if (!smp_load_acquire(&waiter->flc_blocker) && + list_empty(&waiter->flc_blocked_requests)) return status; spin_lock(&blocked_lock_lock); - if (waiter->c.flc_blocker) + if (waiter->flc_blocker) status = 0; - __locks_wake_up_blocks(&waiter->c); - __locks_delete_block(&waiter->c); + __locks_wake_up_blocks(waiter); + __locks_delete_block(waiter); /* * The setting of fl_blocker to NULL marks the "done" point in deleting * a block. Paired with acquire at the top of this function. */ - smp_store_release(&waiter->c.flc_blocker, NULL); + smp_store_release(&waiter->flc_blocker, NULL); spin_unlock(&blocked_lock_lock); return status; } -- cgit v1.2.3 From 269a6194dcbaa5d8e61f32000486fc0f2acf08d0 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:02:09 -0500 Subject: filelock: reorganize locks_delete_block and __locks_insert_block Rename the old __locks_delete_block to __locks_unlink_lock. Rename change old locks_delete_block function to __locks_delete_block and have it take a file_lock_core. Make locks_delete_block a simple wrapper around __locks_delete_block. Also, change __locks_insert_block to take struct file_lock_core, and fix up its callers. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-28-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/locks.c | 42 ++++++++++++++++++++++-------------------- 1 file changed, 22 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/locks.c b/fs/locks.c index a2be1e0b5a94..c8fd2964dd98 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -659,7 +659,7 @@ static void locks_delete_global_blocked(struct file_lock_core *waiter) * * Must be called with blocked_lock_lock held. */ -static void __locks_delete_block(struct file_lock_core *waiter) +static void __locks_unlink_block(struct file_lock_core *waiter) { locks_delete_global_blocked(waiter); list_del_init(&waiter->flc_blocked_member); @@ -675,7 +675,7 @@ static void __locks_wake_up_blocks(struct file_lock_core *blocker) struct file_lock_core, flc_blocked_member); fl = file_lock(waiter); - __locks_delete_block(waiter); + __locks_unlink_block(waiter); if ((waiter->flc_flags & (FL_POSIX | FL_FLOCK)) && fl->fl_lmops && fl->fl_lmops->lm_notify) fl->fl_lmops->lm_notify(fl); @@ -691,16 +691,9 @@ static void __locks_wake_up_blocks(struct file_lock_core *blocker) } } -/** - * locks_delete_block - stop waiting for a file lock - * @waiter: the lock which was waiting - * - * lockd/nfsd need to disconnect the lock while working on it. - */ -int locks_delete_block(struct file_lock *waiter_fl) +static int __locks_delete_block(struct file_lock_core *waiter) { int status = -ENOENT; - struct file_lock_core *waiter = &waiter_fl->c; /* * If fl_blocker is NULL, it won't be set again as this thread "owns" @@ -731,7 +724,7 @@ int locks_delete_block(struct file_lock *waiter_fl) if (waiter->flc_blocker) status = 0; __locks_wake_up_blocks(waiter); - __locks_delete_block(waiter); + __locks_unlink_block(waiter); /* * The setting of fl_blocker to NULL marks the "done" point in deleting @@ -741,6 +734,17 @@ int locks_delete_block(struct file_lock *waiter_fl) spin_unlock(&blocked_lock_lock); return status; } + +/** + * locks_delete_block - stop waiting for a file lock + * @waiter: the lock which was waiting + * + * lockd/nfsd need to disconnect the lock while working on it. + */ +int locks_delete_block(struct file_lock *waiter) +{ + return __locks_delete_block(&waiter->c); +} EXPORT_SYMBOL(locks_delete_block); /* Insert waiter into blocker's block list. @@ -758,13 +762,11 @@ EXPORT_SYMBOL(locks_delete_block); * waiters, and add beneath any waiter that blocks the new waiter. * Thus wakeups don't happen until needed. */ -static void __locks_insert_block(struct file_lock *blocker_fl, - struct file_lock *waiter_fl, +static void __locks_insert_block(struct file_lock_core *blocker, + struct file_lock_core *waiter, bool conflict(struct file_lock_core *, struct file_lock_core *)) { - struct file_lock_core *blocker = &blocker_fl->c; - struct file_lock_core *waiter = &waiter_fl->c; struct file_lock_core *flc; BUG_ON(!list_empty(&waiter->flc_blocked_member)); @@ -789,8 +791,8 @@ new_blocker: } /* Must be called with flc_lock held. */ -static void locks_insert_block(struct file_lock *blocker, - struct file_lock *waiter, +static void locks_insert_block(struct file_lock_core *blocker, + struct file_lock_core *waiter, bool conflict(struct file_lock_core *, struct file_lock_core *)) { @@ -1088,7 +1090,7 @@ find_conflict: if (!(request->c.flc_flags & FL_SLEEP)) goto out; error = FILE_LOCK_DEFERRED; - locks_insert_block(fl, request, flock_locks_conflict); + locks_insert_block(&fl->c, &request->c, flock_locks_conflict); goto out; } if (request->c.flc_flags & FL_ACCESS) @@ -1182,7 +1184,7 @@ retry: __locks_wake_up_blocks(&request->c); if (likely(!posix_locks_deadlock(request, fl))) { error = FILE_LOCK_DEFERRED; - __locks_insert_block(fl, request, + __locks_insert_block(&fl->c, &request->c, posix_locks_conflict); } spin_unlock(&blocked_lock_lock); @@ -1575,7 +1577,7 @@ restart: break_time -= jiffies; if (break_time == 0) break_time++; - locks_insert_block(fl, new_fl, leases_conflict); + locks_insert_block(&fl->c, &new_fl->c, leases_conflict); trace_break_lease_block(inode, new_fl); spin_unlock(&ctx->flc_lock); percpu_up_read(&file_rwsem); -- cgit v1.2.3 From d9077f7bad141df143cc4fa000a68a868bcea7c0 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:02:10 -0500 Subject: filelock: make assign_type helper take a file_lock_core pointer Have assign_type take struct file_lock_core instead of file_lock. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-29-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/locks.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/locks.c b/fs/locks.c index c8fd2964dd98..6892511ed89b 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -439,13 +439,13 @@ static void flock_make_lock(struct file *filp, struct file_lock *fl, int type) fl->fl_end = OFFSET_MAX; } -static int assign_type(struct file_lock *fl, int type) +static int assign_type(struct file_lock_core *flc, int type) { switch (type) { case F_RDLCK: case F_WRLCK: case F_UNLCK: - fl->c.flc_type = type; + flc->flc_type = type; break; default: return -EINVAL; @@ -497,7 +497,7 @@ static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl, fl->fl_ops = NULL; fl->fl_lmops = NULL; - return assign_type(fl, l->l_type); + return assign_type(&fl->c, l->l_type); } /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX @@ -552,7 +552,7 @@ static const struct lock_manager_operations lease_manager_ops = { */ static int lease_init(struct file *filp, int type, struct file_lock *fl) { - if (assign_type(fl, type) != 0) + if (assign_type(&fl->c, type) != 0) return -EINVAL; fl->c.flc_owner = filp; @@ -1409,7 +1409,7 @@ static void lease_clear_pending(struct file_lock *fl, int arg) /* We already had a lease on this file; just change its type */ int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose) { - int error = assign_type(fl, arg); + int error = assign_type(&fl->c, arg); if (error) return error; -- cgit v1.2.3 From 347d49fdf36c5e2411afdb312935d88183fe5811 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:02:11 -0500 Subject: filelock: convert locks_wake_up_blocks to take a file_lock_core pointer Have locks_wake_up_blocks take a file_lock_core pointer, and fix up the callers to pass one in. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-30-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/locks.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/locks.c b/fs/locks.c index 6892511ed89b..9f3670ba0880 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -806,7 +806,7 @@ static void locks_insert_block(struct file_lock_core *blocker, * * Must be called with the inode->flc_lock held! */ -static void locks_wake_up_blocks(struct file_lock *blocker) +static void locks_wake_up_blocks(struct file_lock_core *blocker) { /* * Avoid taking global lock if list is empty. This is safe since new @@ -815,11 +815,11 @@ static void locks_wake_up_blocks(struct file_lock *blocker) * fl_blocked_requests list does not require the flc_lock, so we must * recheck list_empty() after acquiring the blocked_lock_lock. */ - if (list_empty(&blocker->c.flc_blocked_requests)) + if (list_empty(&blocker->flc_blocked_requests)) return; spin_lock(&blocked_lock_lock); - __locks_wake_up_blocks(&blocker->c); + __locks_wake_up_blocks(blocker); spin_unlock(&blocked_lock_lock); } @@ -835,7 +835,7 @@ locks_unlink_lock_ctx(struct file_lock *fl) { locks_delete_global_locks(&fl->c); list_del_init(&fl->c.flc_list); - locks_wake_up_blocks(fl); + locks_wake_up_blocks(&fl->c); } static void @@ -1328,11 +1328,11 @@ retry: locks_insert_lock_ctx(left, &fl->c.flc_list); } right->fl_start = request->fl_end + 1; - locks_wake_up_blocks(right); + locks_wake_up_blocks(&right->c); } if (left) { left->fl_end = request->fl_start - 1; - locks_wake_up_blocks(left); + locks_wake_up_blocks(&left->c); } out: spin_unlock(&ctx->flc_lock); @@ -1414,7 +1414,7 @@ int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose) if (error) return error; lease_clear_pending(fl, arg); - locks_wake_up_blocks(fl); + locks_wake_up_blocks(&fl->c); if (arg == F_UNLCK) { struct file *filp = fl->c.flc_file; -- cgit v1.2.3 From 7c18509bdaefe2164e62829d614ef9ea429f29d2 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:02:12 -0500 Subject: filelock: convert locks_insert_lock_ctx and locks_delete_lock_ctx Have these functions take a file_lock_core pointer instead of a file_lock. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-31-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/locks.c | 44 ++++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 22 deletions(-) (limited to 'fs') diff --git a/fs/locks.c b/fs/locks.c index 9f3670ba0880..50d02a53ca75 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -824,28 +824,28 @@ static void locks_wake_up_blocks(struct file_lock_core *blocker) } static void -locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before) +locks_insert_lock_ctx(struct file_lock_core *fl, struct list_head *before) { - list_add_tail(&fl->c.flc_list, before); - locks_insert_global_locks(&fl->c); + list_add_tail(&fl->flc_list, before); + locks_insert_global_locks(fl); } static void -locks_unlink_lock_ctx(struct file_lock *fl) +locks_unlink_lock_ctx(struct file_lock_core *fl) { - locks_delete_global_locks(&fl->c); - list_del_init(&fl->c.flc_list); - locks_wake_up_blocks(&fl->c); + locks_delete_global_locks(fl); + list_del_init(&fl->flc_list); + locks_wake_up_blocks(fl); } static void -locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose) +locks_delete_lock_ctx(struct file_lock_core *fl, struct list_head *dispose) { locks_unlink_lock_ctx(fl); if (dispose) - list_add(&fl->c.flc_list, dispose); + list_add(&fl->flc_list, dispose); else - locks_free_lock(fl); + locks_free_lock(file_lock(fl)); } /* Determine if lock sys_fl blocks lock caller_fl. Common functionality @@ -1072,7 +1072,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request) if (request->c.flc_type == fl->c.flc_type) goto out; found = true; - locks_delete_lock_ctx(fl, &dispose); + locks_delete_lock_ctx(&fl->c, &dispose); break; } @@ -1097,7 +1097,7 @@ find_conflict: goto out; locks_copy_lock(new_fl, request); locks_move_blocks(new_fl, request); - locks_insert_lock_ctx(new_fl, &ctx->flc_flock); + locks_insert_lock_ctx(&new_fl->c, &ctx->flc_flock); new_fl = NULL; error = 0; @@ -1236,7 +1236,7 @@ retry: else request->fl_end = fl->fl_end; if (added) { - locks_delete_lock_ctx(fl, &dispose); + locks_delete_lock_ctx(&fl->c, &dispose); continue; } request = fl; @@ -1265,7 +1265,7 @@ retry: * one (This may happen several times). */ if (added) { - locks_delete_lock_ctx(fl, &dispose); + locks_delete_lock_ctx(&fl->c, &dispose); continue; } /* @@ -1282,9 +1282,9 @@ retry: locks_move_blocks(new_fl, request); request = new_fl; new_fl = NULL; - locks_insert_lock_ctx(request, + locks_insert_lock_ctx(&request->c, &fl->c.flc_list); - locks_delete_lock_ctx(fl, &dispose); + locks_delete_lock_ctx(&fl->c, &dispose); added = true; } } @@ -1313,7 +1313,7 @@ retry: } locks_copy_lock(new_fl, request); locks_move_blocks(new_fl, request); - locks_insert_lock_ctx(new_fl, &fl->c.flc_list); + locks_insert_lock_ctx(&new_fl->c, &fl->c.flc_list); fl = new_fl; new_fl = NULL; } @@ -1325,7 +1325,7 @@ retry: left = new_fl2; new_fl2 = NULL; locks_copy_lock(left, right); - locks_insert_lock_ctx(left, &fl->c.flc_list); + locks_insert_lock_ctx(&left->c, &fl->c.flc_list); } right->fl_start = request->fl_end + 1; locks_wake_up_blocks(&right->c); @@ -1425,7 +1425,7 @@ int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose) printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync); fl->fl_fasync = NULL; } - locks_delete_lock_ctx(fl, dispose); + locks_delete_lock_ctx(&fl->c, dispose); } return 0; } @@ -1558,7 +1558,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) fl->fl_downgrade_time = break_time; } if (fl->fl_lmops->lm_break(fl)) - locks_delete_lock_ctx(fl, &dispose); + locks_delete_lock_ctx(&fl->c, &dispose); } if (list_empty(&ctx->flc_lease)) @@ -1816,7 +1816,7 @@ generic_add_lease(struct file *filp, int arg, struct file_lock **flp, void **pri if (!leases_enable) goto out; - locks_insert_lock_ctx(lease, &ctx->flc_lease); + locks_insert_lock_ctx(&lease->c, &ctx->flc_lease); /* * The check in break_lease() is lockless. It's possible for another * open to race in after we did the earlier check for a conflicting @@ -1829,7 +1829,7 @@ generic_add_lease(struct file *filp, int arg, struct file_lock **flp, void **pri smp_mb(); error = check_conflicting_open(filp, arg, lease->c.flc_flags); if (error) { - locks_unlink_lock_ctx(lease); + locks_unlink_lock_ctx(&lease->c); goto out; } -- cgit v1.2.3 From ae7eb16e0b5043beeca4f78fa2cde5f075cddda1 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:02:13 -0500 Subject: filelock: convert locks_translate_pid to take file_lock_core locks_translate_pid is used on both locks and leases, so have that take struct file_lock_core. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-32-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/locks.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/locks.c b/fs/locks.c index 50d02a53ca75..97f6e9163130 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -2169,17 +2169,17 @@ EXPORT_SYMBOL_GPL(vfs_test_lock); * * Used to translate a fl_pid into a namespace virtual pid number */ -static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns) +static pid_t locks_translate_pid(struct file_lock_core *fl, struct pid_namespace *ns) { pid_t vnr; struct pid *pid; - if (fl->c.flc_flags & FL_OFDLCK) + if (fl->flc_flags & FL_OFDLCK) return -1; /* Remote locks report a negative pid value */ - if (fl->c.flc_pid <= 0) - return fl->c.flc_pid; + if (fl->flc_pid <= 0) + return fl->flc_pid; /* * If the flock owner process is dead and its pid has been already @@ -2187,10 +2187,10 @@ static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns) * flock owner pid number in init pidns. */ if (ns == &init_pid_ns) - return (pid_t) fl->c.flc_pid; + return (pid_t) fl->flc_pid; rcu_read_lock(); - pid = find_pid_ns(fl->c.flc_pid, &init_pid_ns); + pid = find_pid_ns(fl->flc_pid, &init_pid_ns); vnr = pid_nr_ns(pid, ns); rcu_read_unlock(); return vnr; @@ -2198,7 +2198,7 @@ static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns) static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl) { - flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current)); + flock->l_pid = locks_translate_pid(&fl->c, task_active_pid_ns(current)); #if BITS_PER_LONG == 32 /* * Make sure we can represent the posix lock via @@ -2220,7 +2220,7 @@ static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl) #if BITS_PER_LONG == 32 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl) { - flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current)); + flock->l_pid = locks_translate_pid(&fl->c, task_active_pid_ns(current)); flock->l_start = fl->fl_start; flock->l_len = fl->fl_end == OFFSET_MAX ? 0 : fl->fl_end - fl->fl_start + 1; @@ -2726,7 +2726,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl, struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb); int type = fl->c.flc_type; - pid = locks_translate_pid(fl, proc_pidns); + pid = locks_translate_pid(&fl->c, proc_pidns); /* * If lock owner is dead (and pid is freed) or not visible in current * pidns, zero is shown as a pid value. Check lock info from @@ -2819,7 +2819,7 @@ static int locks_show(struct seq_file *f, void *v) cur = hlist_entry(v, struct file_lock, c.flc_link); - if (locks_translate_pid(cur, proc_pidns) == 0) + if (locks_translate_pid(&cur->c, proc_pidns) == 0) return 0; /* View this crossed linked list as a binary tree, the first member of fl_blocked_requests -- cgit v1.2.3 From a1c2af326cb7d7cd07db38740ebb3aafa428fd6f Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:02:14 -0500 Subject: filelock: convert seqfile handling to use file_lock_core Reduce some pointer manipulation by just using file_lock_core where we can and only translate to a file_lock when needed. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-33-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/locks.c | 72 +++++++++++++++++++++++++++++++------------------------------- 1 file changed, 36 insertions(+), 36 deletions(-) (limited to 'fs') diff --git a/fs/locks.c b/fs/locks.c index 97f6e9163130..1a4b01203d3d 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -2718,52 +2718,53 @@ struct locks_iterator { loff_t li_pos; }; -static void lock_get_status(struct seq_file *f, struct file_lock *fl, +static void lock_get_status(struct seq_file *f, struct file_lock_core *flc, loff_t id, char *pfx, int repeat) { struct inode *inode = NULL; unsigned int pid; struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb); - int type = fl->c.flc_type; + int type = flc->flc_type; + struct file_lock *fl = file_lock(flc); + + pid = locks_translate_pid(flc, proc_pidns); - pid = locks_translate_pid(&fl->c, proc_pidns); /* * If lock owner is dead (and pid is freed) or not visible in current * pidns, zero is shown as a pid value. Check lock info from * init_pid_ns to get saved lock pid value. */ - - if (fl->c.flc_file != NULL) - inode = file_inode(fl->c.flc_file); + if (flc->flc_file != NULL) + inode = file_inode(flc->flc_file); seq_printf(f, "%lld: ", id); if (repeat) seq_printf(f, "%*s", repeat - 1 + (int)strlen(pfx), pfx); - if (fl->c.flc_flags & FL_POSIX) { - if (fl->c.flc_flags & FL_ACCESS) + if (flc->flc_flags & FL_POSIX) { + if (flc->flc_flags & FL_ACCESS) seq_puts(f, "ACCESS"); - else if (fl->c.flc_flags & FL_OFDLCK) + else if (flc->flc_flags & FL_OFDLCK) seq_puts(f, "OFDLCK"); else seq_puts(f, "POSIX "); seq_printf(f, " %s ", (inode == NULL) ? "*NOINODE*" : "ADVISORY "); - } else if (fl->c.flc_flags & FL_FLOCK) { + } else if (flc->flc_flags & FL_FLOCK) { seq_puts(f, "FLOCK ADVISORY "); - } else if (fl->c.flc_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT)) { + } else if (flc->flc_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT)) { type = target_leasetype(fl); - if (fl->c.flc_flags & FL_DELEG) + if (flc->flc_flags & FL_DELEG) seq_puts(f, "DELEG "); else seq_puts(f, "LEASE "); if (lease_breaking(fl)) seq_puts(f, "BREAKING "); - else if (fl->c.flc_file) + else if (flc->flc_file) seq_puts(f, "ACTIVE "); else seq_puts(f, "BREAKER "); @@ -2781,7 +2782,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl, } else { seq_printf(f, "%d :0 ", pid); } - if (fl->c.flc_flags & FL_POSIX) { + if (flc->flc_flags & FL_POSIX) { if (fl->fl_end == OFFSET_MAX) seq_printf(f, "%Ld EOF\n", fl->fl_start); else @@ -2791,18 +2792,18 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl, } } -static struct file_lock *get_next_blocked_member(struct file_lock *node) +static struct file_lock_core *get_next_blocked_member(struct file_lock_core *node) { - struct file_lock *tmp; + struct file_lock_core *tmp; /* NULL node or root node */ - if (node == NULL || node->c.flc_blocker == NULL) + if (node == NULL || node->flc_blocker == NULL) return NULL; /* Next member in the linked list could be itself */ - tmp = list_next_entry(node, c.flc_blocked_member); - if (list_entry_is_head(tmp, &node->c.flc_blocker->flc_blocked_requests, - c.flc_blocked_member) + tmp = list_next_entry(node, flc_blocked_member); + if (list_entry_is_head(tmp, &node->flc_blocker->flc_blocked_requests, + flc_blocked_member) || tmp == node) { return NULL; } @@ -2813,18 +2814,18 @@ static struct file_lock *get_next_blocked_member(struct file_lock *node) static int locks_show(struct seq_file *f, void *v) { struct locks_iterator *iter = f->private; - struct file_lock *cur, *tmp; + struct file_lock_core *cur, *tmp; struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb); int level = 0; - cur = hlist_entry(v, struct file_lock, c.flc_link); + cur = hlist_entry(v, struct file_lock_core, flc_link); - if (locks_translate_pid(&cur->c, proc_pidns) == 0) + if (locks_translate_pid(cur, proc_pidns) == 0) return 0; - /* View this crossed linked list as a binary tree, the first member of fl_blocked_requests + /* View this crossed linked list as a binary tree, the first member of flc_blocked_requests * is the left child of current node, the next silibing in flc_blocked_member is the - * right child, we can alse get the parent of current node from fl_blocker, so this + * right child, we can alse get the parent of current node from flc_blocker, so this * question becomes traversal of a binary tree */ while (cur != NULL) { @@ -2833,18 +2834,18 @@ static int locks_show(struct seq_file *f, void *v) else lock_get_status(f, cur, iter->li_pos, "", level); - if (!list_empty(&cur->c.flc_blocked_requests)) { + if (!list_empty(&cur->flc_blocked_requests)) { /* Turn left */ - cur = list_first_entry_or_null(&cur->c.flc_blocked_requests, - struct file_lock, - c.flc_blocked_member); + cur = list_first_entry_or_null(&cur->flc_blocked_requests, + struct file_lock_core, + flc_blocked_member); level++; } else { /* Turn right */ tmp = get_next_blocked_member(cur); /* Fall back to parent node */ - while (tmp == NULL && cur->c.flc_blocker != NULL) { - cur = file_lock(cur->c.flc_blocker); + while (tmp == NULL && cur->flc_blocker != NULL) { + cur = cur->flc_blocker; level--; tmp = get_next_blocked_member(cur); } @@ -2859,14 +2860,13 @@ static void __show_fd_locks(struct seq_file *f, struct list_head *head, int *id, struct file *filp, struct files_struct *files) { - struct file_lock *fl; + struct file_lock_core *fl; - list_for_each_entry(fl, head, c.flc_list) { + list_for_each_entry(fl, head, flc_list) { - if (filp != fl->c.flc_file) + if (filp != fl->flc_file) continue; - if (fl->c.flc_owner != files && - fl->c.flc_owner != filp) + if (fl->flc_owner != files && fl->flc_owner != filp) continue; (*id)++; -- cgit v1.2.3 From 459c814a3c5fa39cf7fa85a263b0316cbe1b8720 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:02:15 -0500 Subject: 9p: adapt to breakup of struct file_lock Most of the existing APIs have remained the same, but subsystems that access file_lock fields directly need to reach into struct file_lock_core now. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-34-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/9p/vfs_file.c | 39 +++++++++++++++++++-------------------- 1 file changed, 19 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index a1dabcf73380..abdbbaee5184 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c @@ -9,7 +9,6 @@ #include #include #include -#define _NEED_FILE_LOCK_FIELD_MACROS #include #include #include @@ -108,7 +107,7 @@ static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl) p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl); - if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) { + if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->c.flc_type != F_UNLCK) { filemap_write_and_wait(inode->i_mapping); invalidate_mapping_pages(&inode->i_data, 0, -1); } @@ -127,7 +126,7 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl) fid = filp->private_data; BUG_ON(fid == NULL); - BUG_ON((fl->fl_flags & FL_POSIX) != FL_POSIX); + BUG_ON((fl->c.flc_flags & FL_POSIX) != FL_POSIX); res = locks_lock_file_wait(filp, fl); if (res < 0) @@ -136,7 +135,7 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl) /* convert posix lock to p9 tlock args */ memset(&flock, 0, sizeof(flock)); /* map the lock type */ - switch (fl->fl_type) { + switch (fl->c.flc_type) { case F_RDLCK: flock.type = P9_LOCK_TYPE_RDLCK; break; @@ -152,7 +151,7 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl) flock.length = 0; else flock.length = fl->fl_end - fl->fl_start + 1; - flock.proc_id = fl->fl_pid; + flock.proc_id = fl->c.flc_pid; flock.client_id = fid->clnt->name; if (IS_SETLKW(cmd)) flock.flags = P9_LOCK_FLAGS_BLOCK; @@ -207,13 +206,13 @@ out_unlock: * incase server returned error for lock request, revert * it locally */ - if (res < 0 && fl->fl_type != F_UNLCK) { - unsigned char type = fl->fl_type; + if (res < 0 && fl->c.flc_type != F_UNLCK) { + unsigned char type = fl->c.flc_type; - fl->fl_type = F_UNLCK; + fl->c.flc_type = F_UNLCK; /* Even if this fails we want to return the remote error */ locks_lock_file_wait(filp, fl); - fl->fl_type = type; + fl->c.flc_type = type; } if (flock.client_id != fid->clnt->name) kfree(flock.client_id); @@ -235,7 +234,7 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl) * if we have a conflicting lock locally, no need to validate * with server */ - if (fl->fl_type != F_UNLCK) + if (fl->c.flc_type != F_UNLCK) return res; /* convert posix lock to p9 tgetlock args */ @@ -246,7 +245,7 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl) glock.length = 0; else glock.length = fl->fl_end - fl->fl_start + 1; - glock.proc_id = fl->fl_pid; + glock.proc_id = fl->c.flc_pid; glock.client_id = fid->clnt->name; res = p9_client_getlock_dotl(fid, &glock); @@ -255,13 +254,13 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl) /* map 9p lock type to os lock type */ switch (glock.type) { case P9_LOCK_TYPE_RDLCK: - fl->fl_type = F_RDLCK; + fl->c.flc_type = F_RDLCK; break; case P9_LOCK_TYPE_WRLCK: - fl->fl_type = F_WRLCK; + fl->c.flc_type = F_WRLCK; break; case P9_LOCK_TYPE_UNLCK: - fl->fl_type = F_UNLCK; + fl->c.flc_type = F_UNLCK; break; } if (glock.type != P9_LOCK_TYPE_UNLCK) { @@ -270,7 +269,7 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl) fl->fl_end = OFFSET_MAX; else fl->fl_end = glock.start + glock.length - 1; - fl->fl_pid = -glock.proc_id; + fl->c.flc_pid = -glock.proc_id; } out: if (glock.client_id != fid->clnt->name) @@ -294,7 +293,7 @@ static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl) p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n", filp, cmd, fl, filp); - if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) { + if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->c.flc_type != F_UNLCK) { filemap_write_and_wait(inode->i_mapping); invalidate_mapping_pages(&inode->i_data, 0, -1); } @@ -325,16 +324,16 @@ static int v9fs_file_flock_dotl(struct file *filp, int cmd, p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n", filp, cmd, fl, filp); - if (!(fl->fl_flags & FL_FLOCK)) + if (!(fl->c.flc_flags & FL_FLOCK)) goto out_err; - if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) { + if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->c.flc_type != F_UNLCK) { filemap_write_and_wait(inode->i_mapping); invalidate_mapping_pages(&inode->i_data, 0, -1); } /* Convert flock to posix lock */ - fl->fl_flags |= FL_POSIX; - fl->fl_flags ^= FL_FLOCK; + fl->c.flc_flags |= FL_POSIX; + fl->c.flc_flags ^= FL_FLOCK; if (IS_SETLK(cmd) | IS_SETLKW(cmd)) ret = v9fs_file_do_lock(filp, cmd, fl); -- cgit v1.2.3 From 82a8cb96b23244f40be56b9edcf085af0cc237a6 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:02:16 -0500 Subject: afs: adapt to breakup of struct file_lock Most of the existing APIs have remained the same, but subsystems that access file_lock fields directly need to reach into struct file_lock_core now. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-35-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/afs/flock.c | 38 +++++++++++++++++++------------------- fs/afs/internal.h | 1 - include/trace/events/afs.h | 4 ++-- 3 files changed, 21 insertions(+), 22 deletions(-) (limited to 'fs') diff --git a/fs/afs/flock.c b/fs/afs/flock.c index 4eee3d1ca5ad..f0e96a35093f 100644 --- a/fs/afs/flock.c +++ b/fs/afs/flock.c @@ -121,16 +121,15 @@ static void afs_next_locker(struct afs_vnode *vnode, int error) list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) { if (error && - p->fl_type == type && - afs_file_key(p->fl_file) == key) { + p->c.flc_type == type && + afs_file_key(p->c.flc_file) == key) { list_del_init(&p->fl_u.afs.link); p->fl_u.afs.state = error; locks_wake_up(p); } /* Select the next locker to hand off to. */ - if (next && - (lock_is_write(next) || lock_is_read(p))) + if (next && (lock_is_write(next) || lock_is_read(p))) continue; next = p; } @@ -464,7 +463,7 @@ static int afs_do_setlk(struct file *file, struct file_lock *fl) _enter("{%llx:%llu},%llu-%llu,%u,%u", vnode->fid.vid, vnode->fid.vnode, - fl->fl_start, fl->fl_end, fl->fl_type, mode); + fl->fl_start, fl->fl_end, fl->c.flc_type, mode); fl->fl_ops = &afs_lock_ops; INIT_LIST_HEAD(&fl->fl_u.afs.link); @@ -524,7 +523,7 @@ static int afs_do_setlk(struct file *file, struct file_lock *fl) } if (vnode->lock_state == AFS_VNODE_LOCK_NONE && - !(fl->fl_flags & FL_SLEEP)) { + !(fl->c.flc_flags & FL_SLEEP)) { ret = -EAGAIN; if (type == AFS_LOCK_READ) { if (vnode->status.lock_count == -1) @@ -621,7 +620,7 @@ skip_server_lock: return 0; lock_is_contended: - if (!(fl->fl_flags & FL_SLEEP)) { + if (!(fl->c.flc_flags & FL_SLEEP)) { list_del_init(&fl->fl_u.afs.link); afs_next_locker(vnode, 0); ret = -EAGAIN; @@ -641,7 +640,7 @@ need_to_wait: spin_unlock(&vnode->lock); trace_afs_flock_ev(vnode, fl, afs_flock_waiting, 0); - ret = wait_event_interruptible(fl->fl_wait, + ret = wait_event_interruptible(fl->c.flc_wait, fl->fl_u.afs.state != AFS_LOCK_PENDING); trace_afs_flock_ev(vnode, fl, afs_flock_waited, ret); @@ -704,7 +703,8 @@ static int afs_do_unlk(struct file *file, struct file_lock *fl) struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); int ret; - _enter("{%llx:%llu},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type); + _enter("{%llx:%llu},%u", vnode->fid.vid, vnode->fid.vnode, + fl->c.flc_type); trace_afs_flock_op(vnode, fl, afs_flock_op_unlock); @@ -730,7 +730,7 @@ static int afs_do_getlk(struct file *file, struct file_lock *fl) if (vnode->lock_state == AFS_VNODE_LOCK_DELETED) return -ENOENT; - fl->fl_type = F_UNLCK; + fl->c.flc_type = F_UNLCK; /* check local lock records first */ posix_test_lock(file, fl); @@ -743,18 +743,18 @@ static int afs_do_getlk(struct file *file, struct file_lock *fl) lock_count = READ_ONCE(vnode->status.lock_count); if (lock_count != 0) { if (lock_count > 0) - fl->fl_type = F_RDLCK; + fl->c.flc_type = F_RDLCK; else - fl->fl_type = F_WRLCK; + fl->c.flc_type = F_WRLCK; fl->fl_start = 0; fl->fl_end = OFFSET_MAX; - fl->fl_pid = 0; + fl->c.flc_pid = 0; } } ret = 0; error: - _leave(" = %d [%hd]", ret, fl->fl_type); + _leave(" = %d [%hd]", ret, fl->c.flc_type); return ret; } @@ -769,7 +769,7 @@ int afs_lock(struct file *file, int cmd, struct file_lock *fl) _enter("{%llx:%llu},%d,{t=%x,fl=%x,r=%Ld:%Ld}", vnode->fid.vid, vnode->fid.vnode, cmd, - fl->fl_type, fl->fl_flags, + fl->c.flc_type, fl->c.flc_flags, (long long) fl->fl_start, (long long) fl->fl_end); if (IS_GETLK(cmd)) @@ -804,7 +804,7 @@ int afs_flock(struct file *file, int cmd, struct file_lock *fl) _enter("{%llx:%llu},%d,{t=%x,fl=%x}", vnode->fid.vid, vnode->fid.vnode, cmd, - fl->fl_type, fl->fl_flags); + fl->c.flc_type, fl->c.flc_flags); /* * No BSD flocks over NFS allowed. @@ -813,7 +813,7 @@ int afs_flock(struct file *file, int cmd, struct file_lock *fl) * Not sure whether that would be unique, though, or whether * that would break in other places. */ - if (!(fl->fl_flags & FL_FLOCK)) + if (!(fl->c.flc_flags & FL_FLOCK)) return -ENOLCK; fl->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id); @@ -843,7 +843,7 @@ int afs_flock(struct file *file, int cmd, struct file_lock *fl) */ static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl) { - struct afs_vnode *vnode = AFS_FS_I(file_inode(fl->fl_file)); + struct afs_vnode *vnode = AFS_FS_I(file_inode(fl->c.flc_file)); _enter(""); @@ -861,7 +861,7 @@ static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl) */ static void afs_fl_release_private(struct file_lock *fl) { - struct afs_vnode *vnode = AFS_FS_I(file_inode(fl->fl_file)); + struct afs_vnode *vnode = AFS_FS_I(file_inode(fl->c.flc_file)); _enter(""); diff --git a/fs/afs/internal.h b/fs/afs/internal.h index f5dd428e40f4..9c03fcf7ffaa 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -9,7 +9,6 @@ #include #include #include -#define _NEED_FILE_LOCK_FIELD_MACROS #include #include #include diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index 8d73171cb9f0..789931ea5e48 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h @@ -1164,8 +1164,8 @@ TRACE_EVENT(afs_flock_op, __entry->from = fl->fl_start; __entry->len = fl->fl_end - fl->fl_start + 1; __entry->op = op; - __entry->type = fl->fl_type; - __entry->flags = fl->fl_flags; + __entry->type = fl->c.flc_type; + __entry->flags = fl->c.flc_flags; __entry->debug_id = fl->fl_u.afs.debug_id; ), -- cgit v1.2.3 From 3956f35fbd36aee35766008ce6a9b0ed812d93ef Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:02:17 -0500 Subject: ceph: adapt to breakup of struct file_lock Most of the existing APIs have remained the same, but subsystems that access file_lock fields directly need to reach into struct file_lock_core now. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-36-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/ceph/locks.c | 51 ++++++++++++++++++++++++++------------------------- 1 file changed, 26 insertions(+), 25 deletions(-) (limited to 'fs') diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c index ce773e9c0b79..ebf4ac0055dd 100644 --- a/fs/ceph/locks.c +++ b/fs/ceph/locks.c @@ -7,7 +7,6 @@ #include "super.h" #include "mds_client.h" -#define _NEED_FILE_LOCK_FIELD_MACROS #include #include @@ -34,7 +33,7 @@ void __init ceph_flock_init(void) static void ceph_fl_copy_lock(struct file_lock *dst, struct file_lock *src) { - struct inode *inode = file_inode(dst->fl_file); + struct inode *inode = file_inode(dst->c.flc_file); atomic_inc(&ceph_inode(inode)->i_filelock_ref); dst->fl_u.ceph.inode = igrab(inode); } @@ -111,17 +110,18 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode, else length = fl->fl_end - fl->fl_start + 1; - owner = secure_addr(fl->fl_owner); + owner = secure_addr(fl->c.flc_owner); doutc(cl, "rule: %d, op: %d, owner: %llx, pid: %llu, " "start: %llu, length: %llu, wait: %d, type: %d\n", - (int)lock_type, (int)operation, owner, (u64)fl->fl_pid, - fl->fl_start, length, wait, fl->fl_type); + (int)lock_type, (int)operation, owner, + (u64) fl->c.flc_pid, + fl->fl_start, length, wait, fl->c.flc_type); req->r_args.filelock_change.rule = lock_type; req->r_args.filelock_change.type = cmd; req->r_args.filelock_change.owner = cpu_to_le64(owner); - req->r_args.filelock_change.pid = cpu_to_le64((u64)fl->fl_pid); + req->r_args.filelock_change.pid = cpu_to_le64((u64) fl->c.flc_pid); req->r_args.filelock_change.start = cpu_to_le64(fl->fl_start); req->r_args.filelock_change.length = cpu_to_le64(length); req->r_args.filelock_change.wait = wait; @@ -131,13 +131,13 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode, err = ceph_mdsc_wait_request(mdsc, req, wait ? ceph_lock_wait_for_completion : NULL); if (!err && operation == CEPH_MDS_OP_GETFILELOCK) { - fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid); + fl->c.flc_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid); if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type) - fl->fl_type = F_RDLCK; + fl->c.flc_type = F_RDLCK; else if (CEPH_LOCK_EXCL == req->r_reply_info.filelock_reply->type) - fl->fl_type = F_WRLCK; + fl->c.flc_type = F_WRLCK; else - fl->fl_type = F_UNLCK; + fl->c.flc_type = F_UNLCK; fl->fl_start = le64_to_cpu(req->r_reply_info.filelock_reply->start); length = le64_to_cpu(req->r_reply_info.filelock_reply->start) + @@ -151,8 +151,8 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode, ceph_mdsc_put_request(req); doutc(cl, "rule: %d, op: %d, pid: %llu, start: %llu, " "length: %llu, wait: %d, type: %d, err code %d\n", - (int)lock_type, (int)operation, (u64)fl->fl_pid, - fl->fl_start, length, wait, fl->fl_type, err); + (int)lock_type, (int)operation, (u64) fl->c.flc_pid, + fl->fl_start, length, wait, fl->c.flc_type, err); return err; } @@ -228,10 +228,10 @@ static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc, static int try_unlock_file(struct file *file, struct file_lock *fl) { int err; - unsigned int orig_flags = fl->fl_flags; - fl->fl_flags |= FL_EXISTS; + unsigned int orig_flags = fl->c.flc_flags; + fl->c.flc_flags |= FL_EXISTS; err = locks_lock_file_wait(file, fl); - fl->fl_flags = orig_flags; + fl->c.flc_flags = orig_flags; if (err == -ENOENT) { if (!(orig_flags & FL_EXISTS)) err = 0; @@ -254,13 +254,13 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl) u8 wait = 0; u8 lock_cmd; - if (!(fl->fl_flags & FL_POSIX)) + if (!(fl->c.flc_flags & FL_POSIX)) return -ENOLCK; if (ceph_inode_is_shutdown(inode)) return -ESTALE; - doutc(cl, "fl_owner: %p\n", fl->fl_owner); + doutc(cl, "fl_owner: %p\n", fl->c.flc_owner); /* set wait bit as appropriate, then make command as Ceph expects it*/ if (IS_GETLK(cmd)) @@ -294,7 +294,7 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl) err = ceph_lock_message(CEPH_LOCK_FCNTL, op, inode, lock_cmd, wait, fl); if (!err) { - if (op == CEPH_MDS_OP_SETFILELOCK && F_UNLCK != fl->fl_type) { + if (op == CEPH_MDS_OP_SETFILELOCK && F_UNLCK != fl->c.flc_type) { doutc(cl, "locking locally\n"); err = posix_lock_file(file, fl, NULL); if (err) { @@ -320,13 +320,13 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl) u8 wait = 0; u8 lock_cmd; - if (!(fl->fl_flags & FL_FLOCK)) + if (!(fl->c.flc_flags & FL_FLOCK)) return -ENOLCK; if (ceph_inode_is_shutdown(inode)) return -ESTALE; - doutc(cl, "fl_file: %p\n", fl->fl_file); + doutc(cl, "fl_file: %p\n", fl->c.flc_file); spin_lock(&ci->i_ceph_lock); if (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) { @@ -357,7 +357,7 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl) err = ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK, inode, lock_cmd, wait, fl); - if (!err && F_UNLCK != fl->fl_type) { + if (!err && F_UNLCK != fl->c.flc_type) { err = locks_lock_file_wait(file, fl); if (err) { ceph_lock_message(CEPH_LOCK_FLOCK, @@ -409,10 +409,10 @@ static int lock_to_ceph_filelock(struct inode *inode, cephlock->start = cpu_to_le64(lock->fl_start); cephlock->length = cpu_to_le64(lock->fl_end - lock->fl_start + 1); cephlock->client = cpu_to_le64(0); - cephlock->pid = cpu_to_le64((u64)lock->fl_pid); - cephlock->owner = cpu_to_le64(secure_addr(lock->fl_owner)); + cephlock->pid = cpu_to_le64((u64) lock->c.flc_pid); + cephlock->owner = cpu_to_le64(secure_addr(lock->c.flc_owner)); - switch (lock->fl_type) { + switch (lock->c.flc_type) { case F_RDLCK: cephlock->type = CEPH_LOCK_SHARED; break; @@ -423,7 +423,8 @@ static int lock_to_ceph_filelock(struct inode *inode, cephlock->type = CEPH_LOCK_UNLOCK; break; default: - doutc(cl, "Have unknown lock type %d\n", lock->fl_type); + doutc(cl, "Have unknown lock type %d\n", + lock->c.flc_type); err = -EINVAL; } -- cgit v1.2.3 From 966b7bd3ca3eb6f0b1b175947ba75ffeecad8f4d Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:02:18 -0500 Subject: dlm: adapt to breakup of struct file_lock Most of the existing APIs have remained the same, but subsystems that access file_lock fields directly need to reach into struct file_lock_core now. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-37-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/dlm/plock.c | 45 ++++++++++++++++++++++----------------------- 1 file changed, 22 insertions(+), 23 deletions(-) (limited to 'fs') diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c index fdcddbb96d40..9ca83ef70ed1 100644 --- a/fs/dlm/plock.c +++ b/fs/dlm/plock.c @@ -4,7 +4,6 @@ */ #include -#define _NEED_FILE_LOCK_FIELD_MACROS #include #include #include @@ -139,14 +138,14 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file, } op->info.optype = DLM_PLOCK_OP_LOCK; - op->info.pid = fl->fl_pid; - op->info.ex = (lock_is_write(fl)); - op->info.wait = !!(fl->fl_flags & FL_SLEEP); + op->info.pid = fl->c.flc_pid; + op->info.ex = lock_is_write(fl); + op->info.wait = !!(fl->c.flc_flags & FL_SLEEP); op->info.fsid = ls->ls_global_id; op->info.number = number; op->info.start = fl->fl_start; op->info.end = fl->fl_end; - op->info.owner = (__u64)(long)fl->fl_owner; + op->info.owner = (__u64)(long) fl->c.flc_owner; /* async handling */ if (fl->fl_lmops && fl->fl_lmops->lm_grant) { op_data = kzalloc(sizeof(*op_data), GFP_NOFS); @@ -259,7 +258,7 @@ static int dlm_plock_callback(struct plock_op *op) } /* got fs lock; bookkeep locally as well: */ - flc->fl_flags &= ~FL_SLEEP; + flc->c.flc_flags &= ~FL_SLEEP; if (posix_lock_file(file, flc, NULL)) { /* * This can only happen in the case of kmalloc() failure. @@ -292,7 +291,7 @@ int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file, struct dlm_ls *ls; struct plock_op *op; int rv; - unsigned char saved_flags = fl->fl_flags; + unsigned char saved_flags = fl->c.flc_flags; ls = dlm_find_lockspace_local(lockspace); if (!ls) @@ -305,7 +304,7 @@ int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file, } /* cause the vfs unlock to return ENOENT if lock is not found */ - fl->fl_flags |= FL_EXISTS; + fl->c.flc_flags |= FL_EXISTS; rv = locks_lock_file_wait(file, fl); if (rv == -ENOENT) { @@ -318,14 +317,14 @@ int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file, } op->info.optype = DLM_PLOCK_OP_UNLOCK; - op->info.pid = fl->fl_pid; + op->info.pid = fl->c.flc_pid; op->info.fsid = ls->ls_global_id; op->info.number = number; op->info.start = fl->fl_start; op->info.end = fl->fl_end; - op->info.owner = (__u64)(long)fl->fl_owner; + op->info.owner = (__u64)(long) fl->c.flc_owner; - if (fl->fl_flags & FL_CLOSE) { + if (fl->c.flc_flags & FL_CLOSE) { op->info.flags |= DLM_PLOCK_FL_CLOSE; send_op(op); rv = 0; @@ -346,7 +345,7 @@ out_free: dlm_release_plock_op(op); out: dlm_put_lockspace(ls); - fl->fl_flags = saved_flags; + fl->c.flc_flags = saved_flags; return rv; } EXPORT_SYMBOL_GPL(dlm_posix_unlock); @@ -376,14 +375,14 @@ int dlm_posix_cancel(dlm_lockspace_t *lockspace, u64 number, struct file *file, return -EINVAL; memset(&info, 0, sizeof(info)); - info.pid = fl->fl_pid; - info.ex = (lock_is_write(fl)); + info.pid = fl->c.flc_pid; + info.ex = lock_is_write(fl); info.fsid = ls->ls_global_id; dlm_put_lockspace(ls); info.number = number; info.start = fl->fl_start; info.end = fl->fl_end; - info.owner = (__u64)(long)fl->fl_owner; + info.owner = (__u64)(long) fl->c.flc_owner; rv = do_lock_cancel(&info); switch (rv) { @@ -438,13 +437,13 @@ int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file, } op->info.optype = DLM_PLOCK_OP_GET; - op->info.pid = fl->fl_pid; - op->info.ex = (lock_is_write(fl)); + op->info.pid = fl->c.flc_pid; + op->info.ex = lock_is_write(fl); op->info.fsid = ls->ls_global_id; op->info.number = number; op->info.start = fl->fl_start; op->info.end = fl->fl_end; - op->info.owner = (__u64)(long)fl->fl_owner; + op->info.owner = (__u64)(long) fl->c.flc_owner; send_op(op); wait_event(recv_wq, (op->done != 0)); @@ -456,16 +455,16 @@ int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file, rv = op->info.rv; - fl->fl_type = F_UNLCK; + fl->c.flc_type = F_UNLCK; if (rv == -ENOENT) rv = 0; else if (rv > 0) { locks_init_lock(fl); - fl->fl_type = (op->info.ex) ? F_WRLCK : F_RDLCK; - fl->fl_flags = FL_POSIX; - fl->fl_pid = op->info.pid; + fl->c.flc_type = (op->info.ex) ? F_WRLCK : F_RDLCK; + fl->c.flc_flags = FL_POSIX; + fl->c.flc_pid = op->info.pid; if (op->info.nodeid != dlm_our_nodeid()) - fl->fl_pid = -fl->fl_pid; + fl->c.flc_pid = -fl->c.flc_pid; fl->fl_start = op->info.start; fl->fl_end = op->info.end; rv = 0; -- cgit v1.2.3 From a6bf23e18324d550f789637d469cca654c92fe86 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:02:19 -0500 Subject: gfs2: adapt to breakup of struct file_lock Most of the existing APIs have remained the same, but subsystems that access file_lock fields directly need to reach into struct file_lock_core now. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-38-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/gfs2/file.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index d06488de1b3b..4c42ada60ae7 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -15,7 +15,6 @@ #include #include #include -#define _NEED_FILE_LOCK_FIELD_MACROS #include #include #include @@ -1441,7 +1440,7 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl) struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host); struct lm_lockstruct *ls = &sdp->sd_lockstruct; - if (!(fl->fl_flags & FL_POSIX)) + if (!(fl->c.flc_flags & FL_POSIX)) return -ENOLCK; if (gfs2_withdrawing_or_withdrawn(sdp)) { if (lock_is_unlock(fl)) @@ -1484,7 +1483,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl) int error = 0; int sleeptime; - state = (lock_is_write(fl)) ? LM_ST_EXCLUSIVE : LM_ST_SHARED; + state = lock_is_write(fl) ? LM_ST_EXCLUSIVE : LM_ST_SHARED; flags = GL_EXACT | GL_NOPID; if (!IS_SETLKW(cmd)) flags |= LM_FLAG_TRY_1CB; @@ -1496,8 +1495,8 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl) if (fl_gh->gh_state == state) goto out; locks_init_lock(&request); - request.fl_type = F_UNLCK; - request.fl_flags = FL_FLOCK; + request.c.flc_type = F_UNLCK; + request.c.flc_flags = FL_FLOCK; locks_lock_file_wait(file, &request); gfs2_glock_dq(fl_gh); gfs2_holder_reinit(state, flags, fl_gh); @@ -1558,7 +1557,7 @@ static void do_unflock(struct file *file, struct file_lock *fl) static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl) { - if (!(fl->fl_flags & FL_FLOCK)) + if (!(fl->c.flc_flags & FL_FLOCK)) return -ENOLCK; if (lock_is_unlock(fl)) { -- cgit v1.2.3 From 9a7eec48c9715c897b3a7809799089d708a4de64 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:02:20 -0500 Subject: fuse: adapt to breakup of struct file_lock Most of the existing APIs have remained the same, but subsystems that access file_lock fields directly need to reach into struct file_lock_core now. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-39-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/fuse/file.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 2757870ee6ac..c007b0f0c3a7 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -18,7 +18,6 @@ #include #include #include -#define _NEED_FILE_LOCK_FIELD_MACROS #include #include @@ -2510,14 +2509,14 @@ static int convert_fuse_file_lock(struct fuse_conn *fc, * translate it into the caller's pid namespace. */ rcu_read_lock(); - fl->fl_pid = pid_nr_ns(find_pid_ns(ffl->pid, fc->pid_ns), &init_pid_ns); + fl->c.flc_pid = pid_nr_ns(find_pid_ns(ffl->pid, fc->pid_ns), &init_pid_ns); rcu_read_unlock(); break; default: return -EIO; } - fl->fl_type = ffl->type; + fl->c.flc_type = ffl->type; return 0; } @@ -2531,10 +2530,10 @@ static void fuse_lk_fill(struct fuse_args *args, struct file *file, memset(inarg, 0, sizeof(*inarg)); inarg->fh = ff->fh; - inarg->owner = fuse_lock_owner_id(fc, fl->fl_owner); + inarg->owner = fuse_lock_owner_id(fc, fl->c.flc_owner); inarg->lk.start = fl->fl_start; inarg->lk.end = fl->fl_end; - inarg->lk.type = fl->fl_type; + inarg->lk.type = fl->c.flc_type; inarg->lk.pid = pid; if (flock) inarg->lk_flags |= FUSE_LK_FLOCK; @@ -2571,8 +2570,8 @@ static int fuse_setlk(struct file *file, struct file_lock *fl, int flock) struct fuse_mount *fm = get_fuse_mount(inode); FUSE_ARGS(args); struct fuse_lk_in inarg; - int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK; - struct pid *pid = fl->fl_type != F_UNLCK ? task_tgid(current) : NULL; + int opcode = (fl->c.flc_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK; + struct pid *pid = fl->c.flc_type != F_UNLCK ? task_tgid(current) : NULL; pid_t pid_nr = pid_nr_ns(pid, fm->fc->pid_ns); int err; @@ -2582,7 +2581,7 @@ static int fuse_setlk(struct file *file, struct file_lock *fl, int flock) } /* Unlock on close is handled by the flush method */ - if ((fl->fl_flags & FL_CLOSE_POSIX) == FL_CLOSE_POSIX) + if ((fl->c.flc_flags & FL_CLOSE_POSIX) == FL_CLOSE_POSIX) return 0; fuse_lk_fill(&args, file, fl, opcode, pid_nr, flock, &inarg); -- cgit v1.2.3 From eb8ed7c6ab08cde2e8869adc72cc02c7368f0a21 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:02:21 -0500 Subject: lockd: adapt to breakup of struct file_lock Most of the existing APIs have remained the same, but subsystems that access file_lock fields directly need to reach into struct file_lock_core now. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-40-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/lockd/clnt4xdr.c | 14 +++++----- fs/lockd/clntlock.c | 2 +- fs/lockd/clntproc.c | 62 +++++++++++++++++++++++-------------------- fs/lockd/clntxdr.c | 14 +++++----- fs/lockd/svc4proc.c | 10 +++---- fs/lockd/svclock.c | 64 +++++++++++++++++++++++---------------------- fs/lockd/svcproc.c | 10 +++---- fs/lockd/svcsubs.c | 20 +++++++------- fs/lockd/xdr.c | 14 +++++----- fs/lockd/xdr4.c | 14 +++++----- include/linux/lockd/lockd.h | 8 +++--- include/linux/lockd/xdr.h | 1 - 12 files changed, 119 insertions(+), 114 deletions(-) (limited to 'fs') diff --git a/fs/lockd/clnt4xdr.c b/fs/lockd/clnt4xdr.c index 8161667c976f..527458db4525 100644 --- a/fs/lockd/clnt4xdr.c +++ b/fs/lockd/clnt4xdr.c @@ -243,7 +243,7 @@ static void encode_nlm4_holder(struct xdr_stream *xdr, u64 l_offset, l_len; __be32 *p; - encode_bool(xdr, lock->fl.fl_type == F_RDLCK); + encode_bool(xdr, lock->fl.c.flc_type == F_RDLCK); encode_int32(xdr, lock->svid); encode_netobj(xdr, lock->oh.data, lock->oh.len); @@ -270,7 +270,7 @@ static int decode_nlm4_holder(struct xdr_stream *xdr, struct nlm_res *result) goto out_overflow; exclusive = be32_to_cpup(p++); lock->svid = be32_to_cpup(p); - fl->fl_pid = (pid_t)lock->svid; + fl->c.flc_pid = (pid_t)lock->svid; error = decode_netobj(xdr, &lock->oh); if (unlikely(error)) @@ -280,8 +280,8 @@ static int decode_nlm4_holder(struct xdr_stream *xdr, struct nlm_res *result) if (unlikely(p == NULL)) goto out_overflow; - fl->fl_flags = FL_POSIX; - fl->fl_type = exclusive != 0 ? F_WRLCK : F_RDLCK; + fl->c.flc_flags = FL_POSIX; + fl->c.flc_type = exclusive != 0 ? F_WRLCK : F_RDLCK; p = xdr_decode_hyper(p, &l_offset); xdr_decode_hyper(p, &l_len); nlm4svc_set_file_lock_range(fl, l_offset, l_len); @@ -357,7 +357,7 @@ static void nlm4_xdr_enc_testargs(struct rpc_rqst *req, const struct nlm_lock *lock = &args->lock; encode_cookie(xdr, &args->cookie); - encode_bool(xdr, lock->fl.fl_type == F_WRLCK); + encode_bool(xdr, lock->fl.c.flc_type == F_WRLCK); encode_nlm4_lock(xdr, lock); } @@ -380,7 +380,7 @@ static void nlm4_xdr_enc_lockargs(struct rpc_rqst *req, encode_cookie(xdr, &args->cookie); encode_bool(xdr, args->block); - encode_bool(xdr, lock->fl.fl_type == F_WRLCK); + encode_bool(xdr, lock->fl.c.flc_type == F_WRLCK); encode_nlm4_lock(xdr, lock); encode_bool(xdr, args->reclaim); encode_int32(xdr, args->state); @@ -403,7 +403,7 @@ static void nlm4_xdr_enc_cancargs(struct rpc_rqst *req, encode_cookie(xdr, &args->cookie); encode_bool(xdr, args->block); - encode_bool(xdr, lock->fl.fl_type == F_WRLCK); + encode_bool(xdr, lock->fl.c.flc_type == F_WRLCK); encode_nlm4_lock(xdr, lock); } diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c index 5d85715be763..a7e0519ec024 100644 --- a/fs/lockd/clntlock.c +++ b/fs/lockd/clntlock.c @@ -185,7 +185,7 @@ __be32 nlmclnt_grant(const struct sockaddr *addr, const struct nlm_lock *lock) continue; if (!rpc_cmp_addr(nlm_addr(block->b_host), addr)) continue; - if (nfs_compare_fh(NFS_FH(file_inode(fl_blocked->fl_file)), fh) != 0) + if (nfs_compare_fh(NFS_FH(file_inode(fl_blocked->c.flc_file)), fh) != 0) continue; /* Alright, we found a lock. Set the return status * and wake up the caller diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c index 1f71260603b7..cebcc283b7ce 100644 --- a/fs/lockd/clntproc.c +++ b/fs/lockd/clntproc.c @@ -12,7 +12,6 @@ #include #include #include -#define _NEED_FILE_LOCK_FIELD_MACROS #include #include #include @@ -134,7 +133,8 @@ static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl) char *nodename = req->a_host->h_rpcclnt->cl_nodename; nlmclnt_next_cookie(&argp->cookie); - memcpy(&lock->fh, NFS_FH(file_inode(fl->fl_file)), sizeof(struct nfs_fh)); + memcpy(&lock->fh, NFS_FH(file_inode(fl->c.flc_file)), + sizeof(struct nfs_fh)); lock->caller = nodename; lock->oh.data = req->a_owner; lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s", @@ -143,7 +143,7 @@ static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl) lock->svid = fl->fl_u.nfs_fl.owner->pid; lock->fl.fl_start = fl->fl_start; lock->fl.fl_end = fl->fl_end; - lock->fl.fl_type = fl->fl_type; + lock->fl.c.flc_type = fl->c.flc_type; } static void nlmclnt_release_lockargs(struct nlm_rqst *req) @@ -183,7 +183,7 @@ int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl, void *dat call->a_callback_data = data; if (IS_SETLK(cmd) || IS_SETLKW(cmd)) { - if (fl->fl_type != F_UNLCK) { + if (fl->c.flc_type != F_UNLCK) { call->a_args.block = IS_SETLKW(cmd) ? 1 : 0; status = nlmclnt_lock(call, fl); } else @@ -433,13 +433,14 @@ nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl) { int status; - status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_TEST); + status = nlmclnt_call(nfs_file_cred(fl->c.flc_file), req, + NLMPROC_TEST); if (status < 0) goto out; switch (req->a_res.status) { case nlm_granted: - fl->fl_type = F_UNLCK; + fl->c.flc_type = F_UNLCK; break; case nlm_lck_denied: /* @@ -447,8 +448,8 @@ nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl) */ fl->fl_start = req->a_res.lock.fl.fl_start; fl->fl_end = req->a_res.lock.fl.fl_end; - fl->fl_type = req->a_res.lock.fl.fl_type; - fl->fl_pid = -req->a_res.lock.fl.fl_pid; + fl->c.flc_type = req->a_res.lock.fl.c.flc_type; + fl->c.flc_pid = -req->a_res.lock.fl.c.flc_pid; break; default: status = nlm_stat_to_errno(req->a_res.status); @@ -486,14 +487,15 @@ static const struct file_lock_operations nlmclnt_lock_ops = { static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host) { fl->fl_u.nfs_fl.state = 0; - fl->fl_u.nfs_fl.owner = nlmclnt_find_lockowner(host, fl->fl_owner); + fl->fl_u.nfs_fl.owner = nlmclnt_find_lockowner(host, + fl->c.flc_owner); INIT_LIST_HEAD(&fl->fl_u.nfs_fl.list); fl->fl_ops = &nlmclnt_lock_ops; } static int do_vfs_lock(struct file_lock *fl) { - return locks_lock_file_wait(fl->fl_file, fl); + return locks_lock_file_wait(fl->c.flc_file, fl); } /* @@ -519,11 +521,11 @@ static int do_vfs_lock(struct file_lock *fl) static int nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) { - const struct cred *cred = nfs_file_cred(fl->fl_file); + const struct cred *cred = nfs_file_cred(fl->c.flc_file); struct nlm_host *host = req->a_host; struct nlm_res *resp = &req->a_res; struct nlm_wait block; - unsigned char flags = fl->fl_flags; + unsigned char flags = fl->c.flc_flags; unsigned char type; __be32 b_status; int status = -ENOLCK; @@ -532,9 +534,9 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) goto out; req->a_args.state = nsm_local_state; - fl->fl_flags |= FL_ACCESS; + fl->c.flc_flags |= FL_ACCESS; status = do_vfs_lock(fl); - fl->fl_flags = flags; + fl->c.flc_flags = flags; if (status < 0) goto out; @@ -592,11 +594,11 @@ again: goto again; } /* Ensure the resulting lock will get added to granted list */ - fl->fl_flags |= FL_SLEEP; + fl->c.flc_flags |= FL_SLEEP; if (do_vfs_lock(fl) < 0) printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__); up_read(&host->h_rwsem); - fl->fl_flags = flags; + fl->c.flc_flags = flags; status = 0; } if (status < 0) @@ -623,13 +625,13 @@ out_unlock: req->a_host->h_addrlen, req->a_res.status); dprintk("lockd: lock attempt ended in fatal error.\n" " Attempting to unlock.\n"); - type = fl->fl_type; - fl->fl_type = F_UNLCK; + type = fl->c.flc_type; + fl->c.flc_type = F_UNLCK; down_read(&host->h_rwsem); do_vfs_lock(fl); up_read(&host->h_rwsem); - fl->fl_type = type; - fl->fl_flags = flags; + fl->c.flc_type = type; + fl->c.flc_flags = flags; nlmclnt_async_call(cred, req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops); return status; } @@ -652,12 +654,14 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl, nlmclnt_setlockargs(req, fl); req->a_args.reclaim = 1; - status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_LOCK); + status = nlmclnt_call(nfs_file_cred(fl->c.flc_file), req, + NLMPROC_LOCK); if (status >= 0 && req->a_res.status == nlm_granted) return 0; printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d " - "(errno %d, status %d)\n", fl->fl_pid, + "(errno %d, status %d)\n", + fl->c.flc_pid, status, ntohl(req->a_res.status)); /* @@ -684,26 +688,26 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl) struct nlm_host *host = req->a_host; struct nlm_res *resp = &req->a_res; int status; - unsigned char flags = fl->fl_flags; + unsigned char flags = fl->c.flc_flags; /* * Note: the server is supposed to either grant us the unlock * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either * case, we want to unlock. */ - fl->fl_flags |= FL_EXISTS; + fl->c.flc_flags |= FL_EXISTS; down_read(&host->h_rwsem); status = do_vfs_lock(fl); up_read(&host->h_rwsem); - fl->fl_flags = flags; + fl->c.flc_flags = flags; if (status == -ENOENT) { status = 0; goto out; } refcount_inc(&req->a_count); - status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req, - NLMPROC_UNLOCK, &nlmclnt_unlock_ops); + status = nlmclnt_async_call(nfs_file_cred(fl->c.flc_file), req, + NLMPROC_UNLOCK, &nlmclnt_unlock_ops); if (status < 0) goto out; @@ -796,8 +800,8 @@ static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl req->a_args.block = block; refcount_inc(&req->a_count); - status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req, - NLMPROC_CANCEL, &nlmclnt_cancel_ops); + status = nlmclnt_async_call(nfs_file_cred(fl->c.flc_file), req, + NLMPROC_CANCEL, &nlmclnt_cancel_ops); if (status == 0 && req->a_res.status == nlm_lck_denied) status = -ENOLCK; nlmclnt_release_call(req); diff --git a/fs/lockd/clntxdr.c b/fs/lockd/clntxdr.c index 4df62f635529..a3e97278b997 100644 --- a/fs/lockd/clntxdr.c +++ b/fs/lockd/clntxdr.c @@ -238,7 +238,7 @@ static void encode_nlm_holder(struct xdr_stream *xdr, u32 l_offset, l_len; __be32 *p; - encode_bool(xdr, lock->fl.fl_type == F_RDLCK); + encode_bool(xdr, lock->fl.c.flc_type == F_RDLCK); encode_int32(xdr, lock->svid); encode_netobj(xdr, lock->oh.data, lock->oh.len); @@ -265,7 +265,7 @@ static int decode_nlm_holder(struct xdr_stream *xdr, struct nlm_res *result) goto out_overflow; exclusive = be32_to_cpup(p++); lock->svid = be32_to_cpup(p); - fl->fl_pid = (pid_t)lock->svid; + fl->c.flc_pid = (pid_t)lock->svid; error = decode_netobj(xdr, &lock->oh); if (unlikely(error)) @@ -275,8 +275,8 @@ static int decode_nlm_holder(struct xdr_stream *xdr, struct nlm_res *result) if (unlikely(p == NULL)) goto out_overflow; - fl->fl_flags = FL_POSIX; - fl->fl_type = exclusive != 0 ? F_WRLCK : F_RDLCK; + fl->c.flc_flags = FL_POSIX; + fl->c.flc_type = exclusive != 0 ? F_WRLCK : F_RDLCK; l_offset = be32_to_cpup(p++); l_len = be32_to_cpup(p); end = l_offset + l_len - 1; @@ -357,7 +357,7 @@ static void nlm_xdr_enc_testargs(struct rpc_rqst *req, const struct nlm_lock *lock = &args->lock; encode_cookie(xdr, &args->cookie); - encode_bool(xdr, lock->fl.fl_type == F_WRLCK); + encode_bool(xdr, lock->fl.c.flc_type == F_WRLCK); encode_nlm_lock(xdr, lock); } @@ -380,7 +380,7 @@ static void nlm_xdr_enc_lockargs(struct rpc_rqst *req, encode_cookie(xdr, &args->cookie); encode_bool(xdr, args->block); - encode_bool(xdr, lock->fl.fl_type == F_WRLCK); + encode_bool(xdr, lock->fl.c.flc_type == F_WRLCK); encode_nlm_lock(xdr, lock); encode_bool(xdr, args->reclaim); encode_int32(xdr, args->state); @@ -403,7 +403,7 @@ static void nlm_xdr_enc_cancargs(struct rpc_rqst *req, encode_cookie(xdr, &args->cookie); encode_bool(xdr, args->block); - encode_bool(xdr, lock->fl.fl_type == F_WRLCK); + encode_bool(xdr, lock->fl.c.flc_type == F_WRLCK); encode_nlm_lock(xdr, lock); } diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c index b72023a6b4c1..8a72c418cdcc 100644 --- a/fs/lockd/svc4proc.c +++ b/fs/lockd/svc4proc.c @@ -52,16 +52,16 @@ nlm4svc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp, *filp = file; /* Set up the missing parts of the file_lock structure */ - lock->fl.fl_flags = FL_POSIX; - lock->fl.fl_file = file->f_file[mode]; - lock->fl.fl_pid = current->tgid; + lock->fl.c.flc_flags = FL_POSIX; + lock->fl.c.flc_file = file->f_file[mode]; + lock->fl.c.flc_pid = current->tgid; lock->fl.fl_start = (loff_t)lock->lock_start; lock->fl.fl_end = lock->lock_len ? (loff_t)(lock->lock_start + lock->lock_len - 1) : OFFSET_MAX; lock->fl.fl_lmops = &nlmsvc_lock_operations; nlmsvc_locks_init_private(&lock->fl, host, (pid_t)lock->svid); - if (!lock->fl.fl_owner) { + if (!lock->fl.c.flc_owner) { /* lockowner allocation has failed */ nlmsvc_release_host(host); return nlm_lck_denied_nolocks; @@ -106,7 +106,7 @@ __nlm4svc_proc_test(struct svc_rqst *rqstp, struct nlm_res *resp) if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; - test_owner = argp->lock.fl.fl_owner; + test_owner = argp->lock.fl.c.flc_owner; /* Now check for conflicting locks */ resp->status = nlmsvc_testlock(rqstp, file, host, &argp->lock, &resp->lock, &resp->cookie); if (resp->status == nlm_drop_reply) diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index 2dc10900ad1c..1f2149db10f2 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c @@ -150,16 +150,17 @@ nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock) struct file_lock *fl; dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n", - file, lock->fl.fl_pid, + file, lock->fl.c.flc_pid, (long long)lock->fl.fl_start, - (long long)lock->fl.fl_end, lock->fl.fl_type); + (long long)lock->fl.fl_end, + lock->fl.c.flc_type); spin_lock(&nlm_blocked_lock); list_for_each_entry(block, &nlm_blocked, b_list) { fl = &block->b_call->a_args.lock.fl; dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n", - block->b_file, fl->fl_pid, + block->b_file, fl->c.flc_pid, (long long)fl->fl_start, - (long long)fl->fl_end, fl->fl_type, + (long long)fl->fl_end, fl->c.flc_type, nlmdbg_cookie2a(&block->b_call->a_args.cookie)); if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) { kref_get(&block->b_count); @@ -244,7 +245,7 @@ nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host, goto failed_free; /* Set notifier function for VFS, and init args */ - call->a_args.lock.fl.fl_flags |= FL_SLEEP; + call->a_args.lock.fl.c.flc_flags |= FL_SLEEP; call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations; nlmclnt_next_cookie(&call->a_args.cookie); @@ -402,14 +403,14 @@ static struct nlm_lockowner *nlmsvc_find_lockowner(struct nlm_host *host, pid_t void nlmsvc_release_lockowner(struct nlm_lock *lock) { - if (lock->fl.fl_owner) - nlmsvc_put_lockowner(lock->fl.fl_owner); + if (lock->fl.c.flc_owner) + nlmsvc_put_lockowner(lock->fl.c.flc_owner); } void nlmsvc_locks_init_private(struct file_lock *fl, struct nlm_host *host, pid_t pid) { - fl->fl_owner = nlmsvc_find_lockowner(host, pid); + fl->c.flc_owner = nlmsvc_find_lockowner(host, pid); } /* @@ -425,7 +426,7 @@ static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock) /* set default data area */ call->a_args.lock.oh.data = call->a_owner; - call->a_args.lock.svid = ((struct nlm_lockowner *)lock->fl.fl_owner)->pid; + call->a_args.lock.svid = ((struct nlm_lockowner *) lock->fl.c.flc_owner)->pid; if (lock->oh.len > NLMCLNT_OHSIZE) { void *data = kmalloc(lock->oh.len, GFP_KERNEL); @@ -489,7 +490,8 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n", inode->i_sb->s_id, inode->i_ino, - lock->fl.fl_type, lock->fl.fl_pid, + lock->fl.c.flc_type, + lock->fl.c.flc_pid, (long long)lock->fl.fl_start, (long long)lock->fl.fl_end, wait); @@ -512,7 +514,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, goto out; lock = &block->b_call->a_args.lock; } else - lock->fl.fl_flags &= ~FL_SLEEP; + lock->fl.c.flc_flags &= ~FL_SLEEP; if (block->b_flags & B_QUEUED) { dprintk("lockd: nlmsvc_lock deferred block %p flags %d\n", @@ -560,10 +562,10 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, spin_unlock(&nlm_blocked_lock); if (!wait) - lock->fl.fl_flags &= ~FL_SLEEP; + lock->fl.c.flc_flags &= ~FL_SLEEP; mode = lock_to_openmode(&lock->fl); error = vfs_lock_file(file->f_file[mode], F_SETLK, &lock->fl, NULL); - lock->fl.fl_flags &= ~FL_SLEEP; + lock->fl.c.flc_flags &= ~FL_SLEEP; dprintk("lockd: vfs_lock_file returned %d\n", error); switch (error) { @@ -616,7 +618,7 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file, dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n", nlmsvc_file_inode(file)->i_sb->s_id, nlmsvc_file_inode(file)->i_ino, - lock->fl.fl_type, + lock->fl.c.flc_type, (long long)lock->fl.fl_start, (long long)lock->fl.fl_end); @@ -636,19 +638,19 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file, goto out; } - if (lock->fl.fl_type == F_UNLCK) { + if (lock->fl.c.flc_type == F_UNLCK) { ret = nlm_granted; goto out; } dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n", - lock->fl.fl_type, (long long)lock->fl.fl_start, + lock->fl.c.flc_type, (long long)lock->fl.fl_start, (long long)lock->fl.fl_end); conflock->caller = "somehost"; /* FIXME */ conflock->len = strlen(conflock->caller); conflock->oh.len = 0; /* don't return OH info */ - conflock->svid = lock->fl.fl_pid; - conflock->fl.fl_type = lock->fl.fl_type; + conflock->svid = lock->fl.c.flc_pid; + conflock->fl.c.flc_type = lock->fl.c.flc_type; conflock->fl.fl_start = lock->fl.fl_start; conflock->fl.fl_end = lock->fl.fl_end; locks_release_private(&lock->fl); @@ -673,21 +675,21 @@ nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock) dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n", nlmsvc_file_inode(file)->i_sb->s_id, nlmsvc_file_inode(file)->i_ino, - lock->fl.fl_pid, + lock->fl.c.flc_pid, (long long)lock->fl.fl_start, (long long)lock->fl.fl_end); /* First, cancel any lock that might be there */ nlmsvc_cancel_blocked(net, file, lock); - lock->fl.fl_type = F_UNLCK; - lock->fl.fl_file = file->f_file[O_RDONLY]; - if (lock->fl.fl_file) - error = vfs_lock_file(lock->fl.fl_file, F_SETLK, + lock->fl.c.flc_type = F_UNLCK; + lock->fl.c.flc_file = file->f_file[O_RDONLY]; + if (lock->fl.c.flc_file) + error = vfs_lock_file(lock->fl.c.flc_file, F_SETLK, &lock->fl, NULL); - lock->fl.fl_file = file->f_file[O_WRONLY]; - if (lock->fl.fl_file) - error |= vfs_lock_file(lock->fl.fl_file, F_SETLK, + lock->fl.c.flc_file = file->f_file[O_WRONLY]; + if (lock->fl.c.flc_file) + error |= vfs_lock_file(lock->fl.c.flc_file, F_SETLK, &lock->fl, NULL); return (error < 0)? nlm_lck_denied_nolocks : nlm_granted; @@ -710,7 +712,7 @@ nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *l dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n", nlmsvc_file_inode(file)->i_sb->s_id, nlmsvc_file_inode(file)->i_ino, - lock->fl.fl_pid, + lock->fl.c.flc_pid, (long long)lock->fl.fl_start, (long long)lock->fl.fl_end); @@ -863,12 +865,12 @@ nlmsvc_grant_blocked(struct nlm_block *block) /* vfs_lock_file() can mangle fl_start and fl_end, but we need * them unchanged for the GRANT_MSG */ - lock->fl.fl_flags |= FL_SLEEP; + lock->fl.c.flc_flags |= FL_SLEEP; fl_start = lock->fl.fl_start; fl_end = lock->fl.fl_end; mode = lock_to_openmode(&lock->fl); error = vfs_lock_file(file->f_file[mode], F_SETLK, &lock->fl, NULL); - lock->fl.fl_flags &= ~FL_SLEEP; + lock->fl.c.flc_flags &= ~FL_SLEEP; lock->fl.fl_start = fl_start; lock->fl.fl_end = fl_end; @@ -993,8 +995,8 @@ nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status) /* Client doesn't want it, just unlock it */ nlmsvc_unlink_block(block); fl = &block->b_call->a_args.lock.fl; - fl->fl_type = F_UNLCK; - error = vfs_lock_file(fl->fl_file, F_SETLK, fl, NULL); + fl->c.flc_type = F_UNLCK; + error = vfs_lock_file(fl->c.flc_file, F_SETLK, fl, NULL); if (error) pr_warn("lockd: unable to unlock lock rejected by client!\n"); break; diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c index 32784f508c81..a03220e66ce0 100644 --- a/fs/lockd/svcproc.c +++ b/fs/lockd/svcproc.c @@ -77,12 +77,12 @@ nlmsvc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp, /* Set up the missing parts of the file_lock structure */ mode = lock_to_openmode(&lock->fl); - lock->fl.fl_flags = FL_POSIX; - lock->fl.fl_file = file->f_file[mode]; - lock->fl.fl_pid = current->tgid; + lock->fl.c.flc_flags = FL_POSIX; + lock->fl.c.flc_file = file->f_file[mode]; + lock->fl.c.flc_pid = current->tgid; lock->fl.fl_lmops = &nlmsvc_lock_operations; nlmsvc_locks_init_private(&lock->fl, host, (pid_t)lock->svid); - if (!lock->fl.fl_owner) { + if (!lock->fl.c.flc_owner) { /* lockowner allocation has failed */ nlmsvc_release_host(host); return nlm_lck_denied_nolocks; @@ -127,7 +127,7 @@ __nlmsvc_proc_test(struct svc_rqst *rqstp, struct nlm_res *resp) if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; - test_owner = argp->lock.fl.fl_owner; + test_owner = argp->lock.fl.c.flc_owner; /* Now check for conflicting locks */ resp->status = cast_status(nlmsvc_testlock(rqstp, file, host, &argp->lock, &resp->lock, &resp->cookie)); diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c index 2f33c187b876..9103896164f6 100644 --- a/fs/lockd/svcsubs.c +++ b/fs/lockd/svcsubs.c @@ -73,7 +73,7 @@ static inline unsigned int file_hash(struct nfs_fh *f) int lock_to_openmode(struct file_lock *lock) { - return (lock_is_write(lock)) ? O_WRONLY : O_RDONLY; + return lock_is_write(lock) ? O_WRONLY : O_RDONLY; } /* @@ -181,18 +181,18 @@ static int nlm_unlock_files(struct nlm_file *file, const struct file_lock *fl) struct file_lock lock; locks_init_lock(&lock); - lock.fl_type = F_UNLCK; + lock.c.flc_type = F_UNLCK; lock.fl_start = 0; lock.fl_end = OFFSET_MAX; - lock.fl_owner = fl->fl_owner; - lock.fl_pid = fl->fl_pid; - lock.fl_flags = FL_POSIX; + lock.c.flc_owner = fl->c.flc_owner; + lock.c.flc_pid = fl->c.flc_pid; + lock.c.flc_flags = FL_POSIX; - lock.fl_file = file->f_file[O_RDONLY]; - if (lock.fl_file && vfs_lock_file(lock.fl_file, F_SETLK, &lock, NULL)) + lock.c.flc_file = file->f_file[O_RDONLY]; + if (lock.c.flc_file && vfs_lock_file(lock.c.flc_file, F_SETLK, &lock, NULL)) goto out_err; - lock.fl_file = file->f_file[O_WRONLY]; - if (lock.fl_file && vfs_lock_file(lock.fl_file, F_SETLK, &lock, NULL)) + lock.c.flc_file = file->f_file[O_WRONLY]; + if (lock.c.flc_file && vfs_lock_file(lock.c.flc_file, F_SETLK, &lock, NULL)) goto out_err; return 0; out_err: @@ -225,7 +225,7 @@ again: /* update current lock count */ file->f_locks++; - lockhost = ((struct nlm_lockowner *)fl->fl_owner)->host; + lockhost = ((struct nlm_lockowner *) fl->c.flc_owner)->host; if (match(lockhost, host)) { spin_unlock(&flctx->flc_lock); diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c index 2fb5748dae0c..adfcce2bf11b 100644 --- a/fs/lockd/xdr.c +++ b/fs/lockd/xdr.c @@ -88,8 +88,8 @@ svcxdr_decode_lock(struct xdr_stream *xdr, struct nlm_lock *lock) return false; locks_init_lock(fl); - fl->fl_flags = FL_POSIX; - fl->fl_type = F_RDLCK; + fl->c.flc_flags = FL_POSIX; + fl->c.flc_type = F_RDLCK; end = start + len - 1; fl->fl_start = s32_to_loff_t(start); if (len == 0 || end < 0) @@ -107,7 +107,7 @@ svcxdr_encode_holder(struct xdr_stream *xdr, const struct nlm_lock *lock) s32 start, len; /* exclusive */ - if (xdr_stream_encode_bool(xdr, fl->fl_type != F_RDLCK) < 0) + if (xdr_stream_encode_bool(xdr, fl->c.flc_type != F_RDLCK) < 0) return false; if (xdr_stream_encode_u32(xdr, lock->svid) < 0) return false; @@ -164,7 +164,7 @@ nlmsvc_decode_testargs(struct svc_rqst *rqstp, struct xdr_stream *xdr) if (!svcxdr_decode_lock(xdr, &argp->lock)) return false; if (exclusive) - argp->lock.fl.fl_type = F_WRLCK; + argp->lock.fl.c.flc_type = F_WRLCK; return true; } @@ -184,7 +184,7 @@ nlmsvc_decode_lockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr) if (!svcxdr_decode_lock(xdr, &argp->lock)) return false; if (exclusive) - argp->lock.fl.fl_type = F_WRLCK; + argp->lock.fl.c.flc_type = F_WRLCK; if (xdr_stream_decode_bool(xdr, &argp->reclaim) < 0) return false; if (xdr_stream_decode_u32(xdr, &argp->state) < 0) @@ -209,7 +209,7 @@ nlmsvc_decode_cancargs(struct svc_rqst *rqstp, struct xdr_stream *xdr) if (!svcxdr_decode_lock(xdr, &argp->lock)) return false; if (exclusive) - argp->lock.fl.fl_type = F_WRLCK; + argp->lock.fl.c.flc_type = F_WRLCK; return true; } @@ -223,7 +223,7 @@ nlmsvc_decode_unlockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr) return false; if (!svcxdr_decode_lock(xdr, &argp->lock)) return false; - argp->lock.fl.fl_type = F_UNLCK; + argp->lock.fl.c.flc_type = F_UNLCK; return true; } diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c index 5fcbf30cd275..3d28b9c3ed15 100644 --- a/fs/lockd/xdr4.c +++ b/fs/lockd/xdr4.c @@ -89,8 +89,8 @@ svcxdr_decode_lock(struct xdr_stream *xdr, struct nlm_lock *lock) return false; locks_init_lock(fl); - fl->fl_flags = FL_POSIX; - fl->fl_type = F_RDLCK; + fl->c.flc_flags = FL_POSIX; + fl->c.flc_type = F_RDLCK; nlm4svc_set_file_lock_range(fl, lock->lock_start, lock->lock_len); return true; } @@ -102,7 +102,7 @@ svcxdr_encode_holder(struct xdr_stream *xdr, const struct nlm_lock *lock) s64 start, len; /* exclusive */ - if (xdr_stream_encode_bool(xdr, fl->fl_type != F_RDLCK) < 0) + if (xdr_stream_encode_bool(xdr, fl->c.flc_type != F_RDLCK) < 0) return false; if (xdr_stream_encode_u32(xdr, lock->svid) < 0) return false; @@ -159,7 +159,7 @@ nlm4svc_decode_testargs(struct svc_rqst *rqstp, struct xdr_stream *xdr) if (!svcxdr_decode_lock(xdr, &argp->lock)) return false; if (exclusive) - argp->lock.fl.fl_type = F_WRLCK; + argp->lock.fl.c.flc_type = F_WRLCK; return true; } @@ -179,7 +179,7 @@ nlm4svc_decode_lockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr) if (!svcxdr_decode_lock(xdr, &argp->lock)) return false; if (exclusive) - argp->lock.fl.fl_type = F_WRLCK; + argp->lock.fl.c.flc_type = F_WRLCK; if (xdr_stream_decode_bool(xdr, &argp->reclaim) < 0) return false; if (xdr_stream_decode_u32(xdr, &argp->state) < 0) @@ -204,7 +204,7 @@ nlm4svc_decode_cancargs(struct svc_rqst *rqstp, struct xdr_stream *xdr) if (!svcxdr_decode_lock(xdr, &argp->lock)) return false; if (exclusive) - argp->lock.fl.fl_type = F_WRLCK; + argp->lock.fl.c.flc_type = F_WRLCK; return true; } @@ -218,7 +218,7 @@ nlm4svc_decode_unlockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr) return false; if (!svcxdr_decode_lock(xdr, &argp->lock)) return false; - argp->lock.fl.fl_type = F_UNLCK; + argp->lock.fl.c.flc_type = F_UNLCK; return true; } diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index 9f565416d186..1b95fe31051f 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -375,12 +375,12 @@ static inline int nlm_privileged_requester(const struct svc_rqst *rqstp) static inline int nlm_compare_locks(const struct file_lock *fl1, const struct file_lock *fl2) { - return file_inode(fl1->fl_file) == file_inode(fl2->fl_file) - && fl1->fl_pid == fl2->fl_pid - && fl1->fl_owner == fl2->fl_owner + return file_inode(fl1->c.flc_file) == file_inode(fl2->c.flc_file) + && fl1->c.flc_pid == fl2->c.flc_pid + && fl1->c.flc_owner == fl2->c.flc_owner && fl1->fl_start == fl2->fl_start && fl1->fl_end == fl2->fl_end - &&(fl1->fl_type == fl2->fl_type || fl2->fl_type == F_UNLCK); + &&(fl1->c.flc_type == fl2->c.flc_type || fl2->c.flc_type == F_UNLCK); } extern const struct lock_manager_operations nlmsvc_lock_operations; diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h index a3f068b0ca86..80cca9426761 100644 --- a/include/linux/lockd/xdr.h +++ b/include/linux/lockd/xdr.h @@ -11,7 +11,6 @@ #define LOCKD_XDR_H #include -#define _NEED_FILE_LOCK_FIELD_MACROS #include #include #include -- cgit v1.2.3 From dd1fac6ae648cac4e92ccc829e94750ddfed5e52 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:02:22 -0500 Subject: nfs: adapt to breakup of struct file_lock Most of the existing APIs have remained the same, but subsystems that access file_lock fields directly need to reach into struct file_lock_core now. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-41-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/nfs/delegation.c | 2 +- fs/nfs/file.c | 19 +++++++++---------- fs/nfs/nfs3proc.c | 2 +- fs/nfs/nfs4_fs.h | 1 - fs/nfs/nfs4proc.c | 33 ++++++++++++++++++--------------- fs/nfs/nfs4state.c | 4 ++-- fs/nfs/nfs4trace.h | 4 ++-- fs/nfs/nfs4xdr.c | 6 +++--- fs/nfs/write.c | 5 ++--- 9 files changed, 38 insertions(+), 38 deletions(-) (limited to 'fs') diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index ca6985001466..d4a42ce0c7e3 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -157,7 +157,7 @@ static int nfs_delegation_claim_locks(struct nfs4_state *state, const nfs4_state spin_lock(&flctx->flc_lock); restart: for_each_file_lock(fl, list) { - if (nfs_file_open_context(fl->fl_file)->state != state) + if (nfs_file_open_context(fl->c.flc_file)->state != state) continue; spin_unlock(&flctx->flc_lock); status = nfs4_lock_delegation_recall(fl, state, stateid); diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 0b6691e64d27..407c6e15afe2 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -31,7 +31,6 @@ #include #include -#define _NEED_FILE_LOCK_FIELD_MACROS #include #include "delegation.h" @@ -721,15 +720,15 @@ do_getlk(struct file *filp, int cmd, struct file_lock *fl, int is_local) { struct inode *inode = filp->f_mapping->host; int status = 0; - unsigned int saved_type = fl->fl_type; + unsigned int saved_type = fl->c.flc_type; /* Try local locking first */ posix_test_lock(filp, fl); - if (fl->fl_type != F_UNLCK) { + if (fl->c.flc_type != F_UNLCK) { /* found a conflict */ goto out; } - fl->fl_type = saved_type; + fl->c.flc_type = saved_type; if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) goto out_noconflict; @@ -741,7 +740,7 @@ do_getlk(struct file *filp, int cmd, struct file_lock *fl, int is_local) out: return status; out_noconflict: - fl->fl_type = F_UNLCK; + fl->c.flc_type = F_UNLCK; goto out; } @@ -766,7 +765,7 @@ do_unlk(struct file *filp, int cmd, struct file_lock *fl, int is_local) * If we're signalled while cleaning up locks on process exit, we * still need to complete the unlock. */ - if (status < 0 && !(fl->fl_flags & FL_CLOSE)) + if (status < 0 && !(fl->c.flc_flags & FL_CLOSE)) return status; } @@ -833,12 +832,12 @@ int nfs_lock(struct file *filp, int cmd, struct file_lock *fl) int is_local = 0; dprintk("NFS: lock(%pD2, t=%x, fl=%x, r=%lld:%lld)\n", - filp, fl->fl_type, fl->fl_flags, + filp, fl->c.flc_type, fl->c.flc_flags, (long long)fl->fl_start, (long long)fl->fl_end); nfs_inc_stats(inode, NFSIOS_VFSLOCK); - if (fl->fl_flags & FL_RECLAIM) + if (fl->c.flc_flags & FL_RECLAIM) return -ENOGRACE; if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FCNTL) @@ -870,9 +869,9 @@ int nfs_flock(struct file *filp, int cmd, struct file_lock *fl) int is_local = 0; dprintk("NFS: flock(%pD2, t=%x, fl=%x)\n", - filp, fl->fl_type, fl->fl_flags); + filp, fl->c.flc_type, fl->c.flc_flags); - if (!(fl->fl_flags & FL_FLOCK)) + if (!(fl->c.flc_flags & FL_FLOCK)) return -ENOLCK; if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FLOCK) diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index 2de66e4e8280..cbbe3f0193b8 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c @@ -963,7 +963,7 @@ nfs3_proc_lock(struct file *filp, int cmd, struct file_lock *fl) struct nfs_open_context *ctx = nfs_file_open_context(filp); int status; - if (fl->fl_flags & FL_CLOSE) { + if (fl->c.flc_flags & FL_CLOSE) { l_ctx = nfs_get_lock_context(ctx); if (IS_ERR(l_ctx)) l_ctx = NULL; diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 752224a48f1c..581698f1b7b2 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -23,7 +23,6 @@ #define NFS4_MAX_LOOP_ON_RECOVER (10) #include -#define _NEED_FILE_LOCK_FIELD_MACROS #include struct idmap; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index df54fcd0fa08..91dddcd79004 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -6800,7 +6800,7 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); switch (status) { case 0: - request->fl_type = F_UNLCK; + request->c.flc_type = F_UNLCK; break; case -NFS4ERR_DENIED: status = 0; @@ -7018,8 +7018,8 @@ static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, /* Ensure this is an unlock - when canceling a lock, the * canceled lock is passed in, and it won't be an unlock. */ - fl->fl_type = F_UNLCK; - if (fl->fl_flags & FL_CLOSE) + fl->c.flc_type = F_UNLCK; + if (fl->c.flc_flags & FL_CLOSE) set_bit(NFS_CONTEXT_UNLOCK, &ctx->flags); data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); @@ -7045,11 +7045,11 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock * struct rpc_task *task; struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); int status = 0; - unsigned char saved_flags = request->fl_flags; + unsigned char saved_flags = request->c.flc_flags; status = nfs4_set_lock_state(state, request); /* Unlock _before_ we do the RPC call */ - request->fl_flags |= FL_EXISTS; + request->c.flc_flags |= FL_EXISTS; /* Exclude nfs_delegation_claim_locks() */ mutex_lock(&sp->so_delegreturn_mutex); /* Exclude nfs4_reclaim_open_stateid() - note nesting! */ @@ -7073,14 +7073,16 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock * status = -ENOMEM; if (IS_ERR(seqid)) goto out; - task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid); + task = nfs4_do_unlck(request, + nfs_file_open_context(request->c.flc_file), + lsp, seqid); status = PTR_ERR(task); if (IS_ERR(task)) goto out; status = rpc_wait_for_completion_task(task); rpc_put_task(task); out: - request->fl_flags = saved_flags; + request->c.flc_flags = saved_flags; trace_nfs4_unlock(request, state, F_SETLK, status); return status; } @@ -7191,7 +7193,7 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata) renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)), data->timestamp); if (data->arg.new_lock && !data->cancelled) { - data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS); + data->fl.c.flc_flags &= ~(FL_SLEEP | FL_ACCESS); if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0) goto out_restart; } @@ -7292,7 +7294,8 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE)) task_setup_data.flags |= RPC_TASK_MOVEABLE; - data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file), + data = nfs4_alloc_lockdata(fl, + nfs_file_open_context(fl->c.flc_file), fl->fl_u.nfs4_fl.owner, GFP_KERNEL); if (data == NULL) return -ENOMEM; @@ -7398,10 +7401,10 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock { struct nfs_inode *nfsi = NFS_I(state->inode); struct nfs4_state_owner *sp = state->owner; - unsigned char flags = request->fl_flags; + unsigned char flags = request->c.flc_flags; int status; - request->fl_flags |= FL_ACCESS; + request->c.flc_flags |= FL_ACCESS; status = locks_lock_inode_wait(state->inode, request); if (status < 0) goto out; @@ -7410,7 +7413,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { /* Yes: cache locks! */ /* ...but avoid races with delegation recall... */ - request->fl_flags = flags & ~FL_SLEEP; + request->c.flc_flags = flags & ~FL_SLEEP; status = locks_lock_inode_wait(state->inode, request); up_read(&nfsi->rwsem); mutex_unlock(&sp->so_delegreturn_mutex); @@ -7420,7 +7423,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock mutex_unlock(&sp->so_delegreturn_mutex); status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); out: - request->fl_flags = flags; + request->c.flc_flags = flags; return status; } @@ -7571,7 +7574,7 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) if (state == NULL) return -ENOLCK; - if ((request->fl_flags & FL_POSIX) && + if ((request->c.flc_flags & FL_POSIX) && !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) return -ENOLCK; @@ -7579,7 +7582,7 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) * Don't rely on the VFS having checked the file open mode, * since it won't do this for flock() locks. */ - switch (request->fl_type) { + switch (request->c.flc_type) { case F_RDLCK: if (!(filp->f_mode & FMODE_READ)) return -EBADF; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 16b57735e26a..8cfabdbda336 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -980,7 +980,7 @@ int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl) if (fl->fl_ops != NULL) return 0; - lsp = nfs4_get_lock_state(state, fl->fl_owner); + lsp = nfs4_get_lock_state(state, fl->c.flc_owner); if (lsp == NULL) return -ENOMEM; fl->fl_u.nfs4_fl.owner = lsp; @@ -1530,7 +1530,7 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_ spin_lock(&flctx->flc_lock); restart: for_each_file_lock(fl, list) { - if (nfs_file_open_context(fl->fl_file)->state != state) + if (nfs_file_open_context(fl->c.flc_file)->state != state) continue; spin_unlock(&flctx->flc_lock); status = ops->recover_lock(state, fl); diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h index d27919d7241d..fd7cb15b08b2 100644 --- a/fs/nfs/nfs4trace.h +++ b/fs/nfs/nfs4trace.h @@ -699,7 +699,7 @@ DECLARE_EVENT_CLASS(nfs4_lock_event, __entry->error = error < 0 ? -error : 0; __entry->cmd = cmd; - __entry->type = request->fl_type; + __entry->type = request->c.flc_type; __entry->start = request->fl_start; __entry->end = request->fl_end; __entry->dev = inode->i_sb->s_dev; @@ -771,7 +771,7 @@ TRACE_EVENT(nfs4_set_lock, __entry->error = error < 0 ? -error : 0; __entry->cmd = cmd; - __entry->type = request->fl_type; + __entry->type = request->c.flc_type; __entry->start = request->fl_start; __entry->end = request->fl_end; __entry->dev = inode->i_sb->s_dev; diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 6e309db5afe4..1416099dfcd1 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -5052,10 +5052,10 @@ static int decode_lock_denied (struct xdr_stream *xdr, struct file_lock *fl) fl->fl_end = fl->fl_start + (loff_t)length - 1; if (length == ~(uint64_t)0) fl->fl_end = OFFSET_MAX; - fl->fl_type = F_WRLCK; + fl->c.flc_type = F_WRLCK; if (type & 1) - fl->fl_type = F_RDLCK; - fl->fl_pid = 0; + fl->c.flc_type = F_RDLCK; + fl->c.flc_pid = 0; } p = xdr_decode_hyper(p, &clientid); /* read 8 bytes */ namelen = be32_to_cpup(p); /* read 4 bytes */ /* have read all 32 bytes now */ diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 13f2e10167ac..84bb85264572 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -25,7 +25,6 @@ #include #include #include -#define _NEED_FILE_LOCK_FIELD_MACROS #include #include @@ -1336,12 +1335,12 @@ static int nfs_can_extend_write(struct file *file, struct folio *folio, spin_lock(&flctx->flc_lock); if (!list_empty(&flctx->flc_posix)) { fl = list_first_entry(&flctx->flc_posix, struct file_lock, - fl_list); + c.flc_list); if (is_whole_file_wrlock(fl)) ret = 1; } else if (!list_empty(&flctx->flc_flock)) { fl = list_first_entry(&flctx->flc_flock, struct file_lock, - fl_list); + c.flc_list); if (lock_is_write(fl)) ret = 1; } -- cgit v1.2.3 From 05580bbfc6bcac93eae2c8a1bf9dc245b2934b28 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:02:23 -0500 Subject: nfsd: adapt to breakup of struct file_lock Most of the existing APIs have remained the same, but subsystems that access file_lock fields directly need to reach into struct file_lock_core now. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-42-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/nfsd/filecache.c | 4 +-- fs/nfsd/netns.h | 1 - fs/nfsd/nfs4callback.c | 2 +- fs/nfsd/nfs4layouts.c | 15 ++++++----- fs/nfsd/nfs4state.c | 69 +++++++++++++++++++++++++------------------------- 5 files changed, 46 insertions(+), 45 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c index 9cb7f0c33df5..b86d8494052c 100644 --- a/fs/nfsd/filecache.c +++ b/fs/nfsd/filecache.c @@ -662,8 +662,8 @@ nfsd_file_lease_notifier_call(struct notifier_block *nb, unsigned long arg, struct file_lock *fl = data; /* Only close files for F_SETLEASE leases */ - if (fl->fl_flags & FL_LEASE) - nfsd_file_close_inode(file_inode(fl->fl_file)); + if (fl->c.flc_flags & FL_LEASE) + nfsd_file_close_inode(file_inode(fl->c.flc_file)); return 0; } diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h index fd91125208be..74b4360779a1 100644 --- a/fs/nfsd/netns.h +++ b/fs/nfsd/netns.h @@ -10,7 +10,6 @@ #include #include -#define _NEED_FILE_LOCK_FIELD_MACROS #include #include #include diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 926c29879c6a..32d23ef3e5de 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -674,7 +674,7 @@ static void nfs4_xdr_enc_cb_notify_lock(struct rpc_rqst *req, const struct nfsd4_callback *cb = data; const struct nfsd4_blocked_lock *nbl = container_of(cb, struct nfsd4_blocked_lock, nbl_cb); - struct nfs4_lockowner *lo = (struct nfs4_lockowner *)nbl->nbl_lock.fl_owner; + struct nfs4_lockowner *lo = (struct nfs4_lockowner *)nbl->nbl_lock.c.flc_owner; struct nfs4_cb_compound_hdr hdr = { .ident = 0, .minorversion = cb->cb_clp->cl_minorversion, diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c index 5e8096bc5eaa..daae68e526e0 100644 --- a/fs/nfsd/nfs4layouts.c +++ b/fs/nfsd/nfs4layouts.c @@ -193,14 +193,15 @@ nfsd4_layout_setlease(struct nfs4_layout_stateid *ls) return -ENOMEM; locks_init_lock(fl); fl->fl_lmops = &nfsd4_layouts_lm_ops; - fl->fl_flags = FL_LAYOUT; - fl->fl_type = F_RDLCK; + fl->c.flc_flags = FL_LAYOUT; + fl->c.flc_type = F_RDLCK; fl->fl_end = OFFSET_MAX; - fl->fl_owner = ls; - fl->fl_pid = current->tgid; - fl->fl_file = ls->ls_file->nf_file; + fl->c.flc_owner = ls; + fl->c.flc_pid = current->tgid; + fl->c.flc_file = ls->ls_file->nf_file; - status = vfs_setlease(fl->fl_file, fl->fl_type, &fl, NULL); + status = vfs_setlease(fl->c.flc_file, fl->c.flc_type, &fl, + NULL); if (status) { locks_free_lock(fl); return status; @@ -731,7 +732,7 @@ nfsd4_layout_lm_break(struct file_lock *fl) * in time: */ fl->fl_break_time = 0; - nfsd4_recall_file_layout(fl->fl_owner); + nfsd4_recall_file_layout(fl->c.flc_owner); return false; } diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index dd83635d4adf..5d9ba9b9cff9 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -4924,7 +4924,7 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp) static bool nfsd_break_deleg_cb(struct file_lock *fl) { - struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner; + struct nfs4_delegation *dp = (struct nfs4_delegation *) fl->c.flc_owner; struct nfs4_file *fp = dp->dl_stid.sc_file; struct nfs4_client *clp = dp->dl_stid.sc_client; struct nfsd_net *nn; @@ -4962,7 +4962,7 @@ nfsd_break_deleg_cb(struct file_lock *fl) */ static bool nfsd_breaker_owns_lease(struct file_lock *fl) { - struct nfs4_delegation *dl = fl->fl_owner; + struct nfs4_delegation *dl = fl->c.flc_owner; struct svc_rqst *rqst; struct nfs4_client *clp; @@ -4980,7 +4980,7 @@ static int nfsd_change_deleg_cb(struct file_lock *onlist, int arg, struct list_head *dispose) { - struct nfs4_delegation *dp = (struct nfs4_delegation *)onlist->fl_owner; + struct nfs4_delegation *dp = (struct nfs4_delegation *) onlist->c.flc_owner; struct nfs4_client *clp = dp->dl_stid.sc_client; if (arg & F_UNLCK) { @@ -5340,12 +5340,12 @@ static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, if (!fl) return NULL; fl->fl_lmops = &nfsd_lease_mng_ops; - fl->fl_flags = FL_DELEG; - fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; + fl->c.flc_flags = FL_DELEG; + fl->c.flc_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; fl->fl_end = OFFSET_MAX; - fl->fl_owner = (fl_owner_t)dp; - fl->fl_pid = current->tgid; - fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file; + fl->c.flc_owner = (fl_owner_t)dp; + fl->c.flc_pid = current->tgid; + fl->c.flc_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file; return fl; } @@ -5533,7 +5533,8 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp, if (!fl) goto out_clnt_odstate; - status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL); + status = vfs_setlease(fp->fi_deleg_file->nf_file, + fl->c.flc_type, &fl, NULL); if (fl) locks_free_lock(fl); if (status) @@ -7149,7 +7150,7 @@ nfsd4_lm_put_owner(fl_owner_t owner) static bool nfsd4_lm_lock_expirable(struct file_lock *cfl) { - struct nfs4_lockowner *lo = (struct nfs4_lockowner *)cfl->fl_owner; + struct nfs4_lockowner *lo = (struct nfs4_lockowner *) cfl->c.flc_owner; struct nfs4_client *clp = lo->lo_owner.so_client; struct nfsd_net *nn; @@ -7171,7 +7172,7 @@ nfsd4_lm_expire_lock(void) static void nfsd4_lm_notify(struct file_lock *fl) { - struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner; + struct nfs4_lockowner *lo = (struct nfs4_lockowner *) fl->c.flc_owner; struct net *net = lo->lo_owner.so_client->net; struct nfsd_net *nn = net_generic(net, nfsd_net_id); struct nfsd4_blocked_lock *nbl = container_of(fl, @@ -7208,7 +7209,7 @@ nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny) struct nfs4_lockowner *lo; if (fl->fl_lmops == &nfsd_posix_mng_ops) { - lo = (struct nfs4_lockowner *) fl->fl_owner; + lo = (struct nfs4_lockowner *) fl->c.flc_owner; xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner, GFP_KERNEL); if (!deny->ld_owner.data) @@ -7227,7 +7228,7 @@ nevermind: if (fl->fl_end != NFS4_MAX_UINT64) deny->ld_length = fl->fl_end - fl->fl_start + 1; deny->ld_type = NFS4_READ_LT; - if (fl->fl_type != F_RDLCK) + if (fl->c.flc_type != F_RDLCK) deny->ld_type = NFS4_WRITE_LT; } @@ -7615,11 +7616,11 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, } file_lock = &nbl->nbl_lock; - file_lock->fl_type = type; - file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner)); - file_lock->fl_pid = current->tgid; - file_lock->fl_file = nf->nf_file; - file_lock->fl_flags = flags; + file_lock->c.flc_type = type; + file_lock->c.flc_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner)); + file_lock->c.flc_pid = current->tgid; + file_lock->c.flc_file = nf->nf_file; + file_lock->c.flc_flags = flags; file_lock->fl_lmops = &nfsd_posix_mng_ops; file_lock->fl_start = lock->lk_offset; file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length); @@ -7737,9 +7738,9 @@ static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct err = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ)); if (err) goto out; - lock->fl_file = nf->nf_file; + lock->c.flc_file = nf->nf_file; err = nfserrno(vfs_test_lock(nf->nf_file, lock)); - lock->fl_file = NULL; + lock->c.flc_file = NULL; out: inode_unlock(inode); nfsd_file_put(nf); @@ -7784,11 +7785,11 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, switch (lockt->lt_type) { case NFS4_READ_LT: case NFS4_READW_LT: - file_lock->fl_type = F_RDLCK; + file_lock->c.flc_type = F_RDLCK; break; case NFS4_WRITE_LT: case NFS4_WRITEW_LT: - file_lock->fl_type = F_WRLCK; + file_lock->c.flc_type = F_WRLCK; break; default: dprintk("NFSD: nfs4_lockt: bad lock type!\n"); @@ -7798,9 +7799,9 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, lo = find_lockowner_str(cstate->clp, &lockt->lt_owner); if (lo) - file_lock->fl_owner = (fl_owner_t)lo; - file_lock->fl_pid = current->tgid; - file_lock->fl_flags = FL_POSIX; + file_lock->c.flc_owner = (fl_owner_t)lo; + file_lock->c.flc_pid = current->tgid; + file_lock->c.flc_flags = FL_POSIX; file_lock->fl_start = lockt->lt_offset; file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length); @@ -7811,7 +7812,7 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, if (status) goto out; - if (file_lock->fl_type != F_UNLCK) { + if (file_lock->c.flc_type != F_UNLCK) { status = nfserr_denied; nfs4_set_lock_denied(file_lock, &lockt->lt_denied); } @@ -7867,11 +7868,11 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, goto put_file; } - file_lock->fl_type = F_UNLCK; - file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner)); - file_lock->fl_pid = current->tgid; - file_lock->fl_file = nf->nf_file; - file_lock->fl_flags = FL_POSIX; + file_lock->c.flc_type = F_UNLCK; + file_lock->c.flc_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner)); + file_lock->c.flc_pid = current->tgid; + file_lock->c.flc_file = nf->nf_file; + file_lock->c.flc_flags = FL_POSIX; file_lock->fl_lmops = &nfsd_posix_mng_ops; file_lock->fl_start = locku->lu_offset; @@ -7927,7 +7928,7 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner) if (flctx && !list_empty_careful(&flctx->flc_posix)) { spin_lock(&flctx->flc_lock); for_each_file_lock(fl, &flctx->flc_posix) { - if (fl->fl_owner == (fl_owner_t)lowner) { + if (fl->c.flc_owner == (fl_owner_t)lowner) { status = true; break; } @@ -8456,7 +8457,7 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode) return 0; spin_lock(&ctx->flc_lock); for_each_file_lock(fl, &ctx->flc_lease) { - if (fl->fl_flags == FL_LAYOUT) + if (fl->c.flc_flags == FL_LAYOUT) continue; if (fl->fl_lmops != &nfsd_lease_mng_ops) { /* @@ -8469,7 +8470,7 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode) goto break_lease; } if (lock_is_write(fl)) { - dp = fl->fl_owner; + dp = fl->c.flc_owner; if (dp->dl_recall.cb_clp == *(rqstp->rq_lease_breaker)) { spin_unlock(&ctx->flc_lock); return 0; -- cgit v1.2.3 From c8df2cc9d63bdb365c520a2dd5472f7b7755644f Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:02:24 -0500 Subject: ocfs2: adapt to breakup of struct file_lock Most of the existing APIs have remained the same, but subsystems that access file_lock fields directly need to reach into struct file_lock_core now. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-43-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/ocfs2/locks.c | 9 ++++----- fs/ocfs2/stack_user.c | 1 - 2 files changed, 4 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/locks.c b/fs/ocfs2/locks.c index 84ad403b5998..6de944818c56 100644 --- a/fs/ocfs2/locks.c +++ b/fs/ocfs2/locks.c @@ -8,7 +8,6 @@ */ #include -#define _NEED_FILE_LOCK_FIELD_MACROS #include #include @@ -54,8 +53,8 @@ static int ocfs2_do_flock(struct file *file, struct inode *inode, */ locks_init_lock(&request); - request.fl_type = F_UNLCK; - request.fl_flags = FL_FLOCK; + request.c.flc_type = F_UNLCK; + request.c.flc_flags = FL_FLOCK; locks_lock_file_wait(file, &request); ocfs2_file_unlock(file); @@ -101,7 +100,7 @@ int ocfs2_flock(struct file *file, int cmd, struct file_lock *fl) struct inode *inode = file->f_mapping->host; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - if (!(fl->fl_flags & FL_FLOCK)) + if (!(fl->c.flc_flags & FL_FLOCK)) return -ENOLCK; if ((osb->s_mount_opt & OCFS2_MOUNT_LOCALFLOCKS) || @@ -119,7 +118,7 @@ int ocfs2_lock(struct file *file, int cmd, struct file_lock *fl) struct inode *inode = file->f_mapping->host; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - if (!(fl->fl_flags & FL_POSIX)) + if (!(fl->c.flc_flags & FL_POSIX)) return -ENOLCK; return ocfs2_plock(osb->cconn, OCFS2_I(inode)->ip_blkno, file, cmd, fl); diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c index 39b7e47a8618..c11406cd87a8 100644 --- a/fs/ocfs2/stack_user.c +++ b/fs/ocfs2/stack_user.c @@ -9,7 +9,6 @@ #include #include -#define _NEED_FILE_LOCK_FIELD_MACROS #include #include #include -- cgit v1.2.3 From 84e286c1bb9b22d5893d34fcedd69224a65c439c Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:02:25 -0500 Subject: smb/client: adapt to breakup of struct file_lock Most of the existing APIs have remained the same, but subsystems that access file_lock fields directly need to reach into struct file_lock_core now. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-44-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/smb/client/cifsglob.h | 1 - fs/smb/client/cifssmb.c | 9 +++---- fs/smb/client/file.c | 67 +++++++++++++++++++++++++----------------------- fs/smb/client/smb2file.c | 3 +-- 4 files changed, 40 insertions(+), 40 deletions(-) (limited to 'fs') diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h index fcda4c77c649..20036fb16cec 100644 --- a/fs/smb/client/cifsglob.h +++ b/fs/smb/client/cifsglob.h @@ -26,7 +26,6 @@ #include #include "../common/smb2pdu.h" #include "smb2pdu.h" -#define _NEED_FILE_LOCK_FIELD_MACROS #include #define SMB_PATH_MAX 260 diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c index e19ecf692c20..5eb83bafc7fd 100644 --- a/fs/smb/client/cifssmb.c +++ b/fs/smb/client/cifssmb.c @@ -15,7 +15,6 @@ /* want to reuse a stale file handle and only the caller knows the file info */ #include -#define _NEED_FILE_LOCK_FIELD_MACROS #include #include #include @@ -2067,20 +2066,20 @@ CIFSSMBPosixLock(const unsigned int xid, struct cifs_tcon *tcon, parm_data = (struct cifs_posix_lock *) ((char *)&pSMBr->hdr.Protocol + data_offset); if (parm_data->lock_type == cpu_to_le16(CIFS_UNLCK)) - pLockData->fl_type = F_UNLCK; + pLockData->c.flc_type = F_UNLCK; else { if (parm_data->lock_type == cpu_to_le16(CIFS_RDLCK)) - pLockData->fl_type = F_RDLCK; + pLockData->c.flc_type = F_RDLCK; else if (parm_data->lock_type == cpu_to_le16(CIFS_WRLCK)) - pLockData->fl_type = F_WRLCK; + pLockData->c.flc_type = F_WRLCK; pLockData->fl_start = le64_to_cpu(parm_data->start); pLockData->fl_end = pLockData->fl_start + (le64_to_cpu(parm_data->length) ? le64_to_cpu(parm_data->length) - 1 : 0); - pLockData->fl_pid = -le32_to_cpu(parm_data->pid); + pLockData->c.flc_pid = -le32_to_cpu(parm_data->pid); } } diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c index d3f5969c3a86..ee8b264fbc31 100644 --- a/fs/smb/client/file.c +++ b/fs/smb/client/file.c @@ -9,7 +9,6 @@ * */ #include -#define _NEED_FILE_LOCK_FIELD_MACROS #include #include #include @@ -1313,20 +1312,20 @@ cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length, down_read(&cinode->lock_sem); exist = cifs_find_lock_conflict(cfile, offset, length, type, - flock->fl_flags, &conf_lock, + flock->c.flc_flags, &conf_lock, CIFS_LOCK_OP); if (exist) { flock->fl_start = conf_lock->offset; flock->fl_end = conf_lock->offset + conf_lock->length - 1; - flock->fl_pid = conf_lock->pid; + flock->c.flc_pid = conf_lock->pid; if (conf_lock->type & server->vals->shared_lock_type) - flock->fl_type = F_RDLCK; + flock->c.flc_type = F_RDLCK; else - flock->fl_type = F_WRLCK; + flock->c.flc_type = F_WRLCK; } else if (!cinode->can_cache_brlcks) rc = 1; else - flock->fl_type = F_UNLCK; + flock->c.flc_type = F_UNLCK; up_read(&cinode->lock_sem); return rc; @@ -1402,16 +1401,16 @@ cifs_posix_lock_test(struct file *file, struct file_lock *flock) { int rc = 0; struct cifsInodeInfo *cinode = CIFS_I(file_inode(file)); - unsigned char saved_type = flock->fl_type; + unsigned char saved_type = flock->c.flc_type; - if ((flock->fl_flags & FL_POSIX) == 0) + if ((flock->c.flc_flags & FL_POSIX) == 0) return 1; down_read(&cinode->lock_sem); posix_test_lock(file, flock); if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) { - flock->fl_type = saved_type; + flock->c.flc_type = saved_type; rc = 1; } @@ -1432,7 +1431,7 @@ cifs_posix_lock_set(struct file *file, struct file_lock *flock) struct cifsInodeInfo *cinode = CIFS_I(file_inode(file)); int rc = FILE_LOCK_DEFERRED + 1; - if ((flock->fl_flags & FL_POSIX) == 0) + if ((flock->c.flc_flags & FL_POSIX) == 0) return rc; cifs_down_write(&cinode->lock_sem); @@ -1583,6 +1582,8 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile) el = locks_to_send.next; spin_lock(&flctx->flc_lock); for_each_file_lock(flock, &flctx->flc_posix) { + unsigned char ftype = flock->c.flc_type; + if (el == &locks_to_send) { /* * The list ended. We don't have enough allocated @@ -1592,12 +1593,12 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile) break; } length = cifs_flock_len(flock); - if (lock_is_read(flock) || flock->fl_type == F_SHLCK) + if (ftype == F_RDLCK || ftype == F_SHLCK) type = CIFS_RDLCK; else type = CIFS_WRLCK; lck = list_entry(el, struct lock_to_push, llist); - lck->pid = hash_lockowner(flock->fl_owner); + lck->pid = hash_lockowner(flock->c.flc_owner); lck->netfid = cfile->fid.netfid; lck->length = length; lck->type = type; @@ -1664,22 +1665,23 @@ static void cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock, bool *wait_flag, struct TCP_Server_Info *server) { - if (flock->fl_flags & FL_POSIX) + if (flock->c.flc_flags & FL_POSIX) cifs_dbg(FYI, "Posix\n"); - if (flock->fl_flags & FL_FLOCK) + if (flock->c.flc_flags & FL_FLOCK) cifs_dbg(FYI, "Flock\n"); - if (flock->fl_flags & FL_SLEEP) { + if (flock->c.flc_flags & FL_SLEEP) { cifs_dbg(FYI, "Blocking lock\n"); *wait_flag = true; } - if (flock->fl_flags & FL_ACCESS) + if (flock->c.flc_flags & FL_ACCESS) cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n"); - if (flock->fl_flags & FL_LEASE) + if (flock->c.flc_flags & FL_LEASE) cifs_dbg(FYI, "Lease on file - not implemented yet\n"); - if (flock->fl_flags & + if (flock->c.flc_flags & (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK))) - cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags); + cifs_dbg(FYI, "Unknown lock flags 0x%x\n", + flock->c.flc_flags); *type = server->vals->large_lock_type; if (lock_is_write(flock)) { @@ -1695,11 +1697,11 @@ cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock, cifs_dbg(FYI, "F_RDLCK\n"); *type |= server->vals->shared_lock_type; *lock = 1; - } else if (flock->fl_type == F_EXLCK) { + } else if (flock->c.flc_type == F_EXLCK) { cifs_dbg(FYI, "F_EXLCK\n"); *type |= server->vals->exclusive_lock_type; *lock = 1; - } else if (flock->fl_type == F_SHLCK) { + } else if (flock->c.flc_type == F_SHLCK) { cifs_dbg(FYI, "F_SHLCK\n"); *type |= server->vals->shared_lock_type; *lock = 1; @@ -1731,7 +1733,7 @@ cifs_getlk(struct file *file, struct file_lock *flock, __u32 type, else posix_lock_type = CIFS_WRLCK; rc = CIFSSMBPosixLock(xid, tcon, netfid, - hash_lockowner(flock->fl_owner), + hash_lockowner(flock->c.flc_owner), flock->fl_start, length, flock, posix_lock_type, wait_flag); return rc; @@ -1748,7 +1750,7 @@ cifs_getlk(struct file *file, struct file_lock *flock, __u32 type, if (rc == 0) { rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type, 0, 1, false); - flock->fl_type = F_UNLCK; + flock->c.flc_type = F_UNLCK; if (rc != 0) cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n", rc); @@ -1756,7 +1758,7 @@ cifs_getlk(struct file *file, struct file_lock *flock, __u32 type, } if (type & server->vals->shared_lock_type) { - flock->fl_type = F_WRLCK; + flock->c.flc_type = F_WRLCK; return 0; } @@ -1768,12 +1770,12 @@ cifs_getlk(struct file *file, struct file_lock *flock, __u32 type, if (rc == 0) { rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type | server->vals->shared_lock_type, 0, 1, false); - flock->fl_type = F_RDLCK; + flock->c.flc_type = F_RDLCK; if (rc != 0) cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n", rc); } else - flock->fl_type = F_WRLCK; + flock->c.flc_type = F_WRLCK; return 0; } @@ -1941,7 +1943,7 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type, posix_lock_type = CIFS_UNLCK; rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid, - hash_lockowner(flock->fl_owner), + hash_lockowner(flock->c.flc_owner), flock->fl_start, length, NULL, posix_lock_type, wait_flag); goto out; @@ -1951,7 +1953,7 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type, struct cifsLockInfo *lock; lock = cifs_lock_init(flock->fl_start, length, type, - flock->fl_flags); + flock->c.flc_flags); if (!lock) return -ENOMEM; @@ -1990,7 +1992,7 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type, rc = server->ops->mand_unlock_range(cfile, flock, xid); out: - if ((flock->fl_flags & FL_POSIX) || (flock->fl_flags & FL_FLOCK)) { + if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) { /* * If this is a request to remove all locks because we * are closing the file, it doesn't matter if the @@ -1999,7 +2001,7 @@ out: */ if (rc) { cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc); - if (!(flock->fl_flags & FL_CLOSE)) + if (!(flock->c.flc_flags & FL_CLOSE)) return rc; } rc = locks_lock_file_wait(file, flock); @@ -2020,7 +2022,7 @@ int cifs_flock(struct file *file, int cmd, struct file_lock *fl) xid = get_xid(); - if (!(fl->fl_flags & FL_FLOCK)) { + if (!(fl->c.flc_flags & FL_FLOCK)) { rc = -ENOLCK; free_xid(xid); return rc; @@ -2071,7 +2073,8 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock) xid = get_xid(); cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd, - flock->fl_flags, flock->fl_type, (long long)flock->fl_start, + flock->c.flc_flags, flock->c.flc_type, + (long long)flock->fl_start, (long long)flock->fl_end); cfile = (struct cifsFileInfo *)file->private_data; diff --git a/fs/smb/client/smb2file.c b/fs/smb/client/smb2file.c index cd225d15a7c5..c23478ab1cf8 100644 --- a/fs/smb/client/smb2file.c +++ b/fs/smb/client/smb2file.c @@ -7,7 +7,6 @@ * */ #include -#define _NEED_FILE_LOCK_FIELD_MACROS #include #include #include @@ -229,7 +228,7 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, * flock and OFD lock are associated with an open * file description, not the process. */ - if (!(flock->fl_flags & (FL_FLOCK | FL_OFDLCK))) + if (!(flock->c.flc_flags & (FL_FLOCK | FL_OFDLCK))) continue; if (cinode->can_cache_brlcks) { /* -- cgit v1.2.3 From 16f9ce818901c7b7618094242adc0b51208a7c89 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:02:26 -0500 Subject: smb/server: adapt to breakup of struct file_lock Most of the existing APIs have remained the same, but subsystems that access file_lock fields directly need to reach into struct file_lock_core now. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-45-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/smb/server/smb2pdu.c | 39 +++++++++++++++++++-------------------- fs/smb/server/vfs.c | 9 ++++----- 2 files changed, 23 insertions(+), 25 deletions(-) (limited to 'fs') diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index 11cc28719582..bec0a846a8d5 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -12,7 +12,6 @@ #include #include #include -#define _NEED_FILE_LOCK_FIELD_MACROS #include #include "glob.h" @@ -6761,10 +6760,10 @@ struct file_lock *smb_flock_init(struct file *f) locks_init_lock(fl); - fl->fl_owner = f; - fl->fl_pid = current->tgid; - fl->fl_file = f; - fl->fl_flags = FL_POSIX; + fl->c.flc_owner = f; + fl->c.flc_pid = current->tgid; + fl->c.flc_file = f; + fl->c.flc_flags = FL_POSIX; fl->fl_ops = NULL; fl->fl_lmops = NULL; @@ -6781,30 +6780,30 @@ static int smb2_set_flock_flags(struct file_lock *flock, int flags) case SMB2_LOCKFLAG_SHARED: ksmbd_debug(SMB, "received shared request\n"); cmd = F_SETLKW; - flock->fl_type = F_RDLCK; - flock->fl_flags |= FL_SLEEP; + flock->c.flc_type = F_RDLCK; + flock->c.flc_flags |= FL_SLEEP; break; case SMB2_LOCKFLAG_EXCLUSIVE: ksmbd_debug(SMB, "received exclusive request\n"); cmd = F_SETLKW; - flock->fl_type = F_WRLCK; - flock->fl_flags |= FL_SLEEP; + flock->c.flc_type = F_WRLCK; + flock->c.flc_flags |= FL_SLEEP; break; case SMB2_LOCKFLAG_SHARED | SMB2_LOCKFLAG_FAIL_IMMEDIATELY: ksmbd_debug(SMB, "received shared & fail immediately request\n"); cmd = F_SETLK; - flock->fl_type = F_RDLCK; + flock->c.flc_type = F_RDLCK; break; case SMB2_LOCKFLAG_EXCLUSIVE | SMB2_LOCKFLAG_FAIL_IMMEDIATELY: ksmbd_debug(SMB, "received exclusive & fail immediately request\n"); cmd = F_SETLK; - flock->fl_type = F_WRLCK; + flock->c.flc_type = F_WRLCK; break; case SMB2_LOCKFLAG_UNLOCK: ksmbd_debug(SMB, "received unlock request\n"); - flock->fl_type = F_UNLCK; + flock->c.flc_type = F_UNLCK; cmd = F_SETLK; break; } @@ -6848,7 +6847,7 @@ static void smb2_remove_blocked_lock(void **argv) static inline bool lock_defer_pending(struct file_lock *fl) { /* check pending lock waiters */ - return waitqueue_active(&fl->fl_wait); + return waitqueue_active(&fl->c.flc_wait); } /** @@ -6939,8 +6938,8 @@ int smb2_lock(struct ksmbd_work *work) list_for_each_entry(cmp_lock, &lock_list, llist) { if (cmp_lock->fl->fl_start <= flock->fl_start && cmp_lock->fl->fl_end >= flock->fl_end) { - if (cmp_lock->fl->fl_type != F_UNLCK && - flock->fl_type != F_UNLCK) { + if (cmp_lock->fl->c.flc_type != F_UNLCK && + flock->c.flc_type != F_UNLCK) { pr_err("conflict two locks in one request\n"); err = -EINVAL; locks_free_lock(flock); @@ -6988,12 +6987,12 @@ int smb2_lock(struct ksmbd_work *work) list_for_each_entry(conn, &conn_list, conns_list) { spin_lock(&conn->llist_lock); list_for_each_entry_safe(cmp_lock, tmp2, &conn->lock_list, clist) { - if (file_inode(cmp_lock->fl->fl_file) != - file_inode(smb_lock->fl->fl_file)) + if (file_inode(cmp_lock->fl->c.flc_file) != + file_inode(smb_lock->fl->c.flc_file)) continue; if (lock_is_unlock(smb_lock->fl)) { - if (cmp_lock->fl->fl_file == smb_lock->fl->fl_file && + if (cmp_lock->fl->c.flc_file == smb_lock->fl->c.flc_file && cmp_lock->start == smb_lock->start && cmp_lock->end == smb_lock->end && !lock_defer_pending(cmp_lock->fl)) { @@ -7010,7 +7009,7 @@ int smb2_lock(struct ksmbd_work *work) continue; } - if (cmp_lock->fl->fl_file == smb_lock->fl->fl_file) { + if (cmp_lock->fl->c.flc_file == smb_lock->fl->c.flc_file) { if (smb_lock->flags & SMB2_LOCKFLAG_SHARED) continue; } else { @@ -7176,7 +7175,7 @@ out: struct file_lock *rlock = NULL; rlock = smb_flock_init(filp); - rlock->fl_type = F_UNLCK; + rlock->c.flc_type = F_UNLCK; rlock->fl_start = smb_lock->start; rlock->fl_end = smb_lock->end; diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c index 5dc87649400b..c487e834331a 100644 --- a/fs/smb/server/vfs.c +++ b/fs/smb/server/vfs.c @@ -6,7 +6,6 @@ #include #include -#define _NEED_FILE_LOCK_FIELD_MACROS #include #include #include @@ -349,7 +348,7 @@ static int check_lock_range(struct file *filp, loff_t start, loff_t end, } } else if (lock_is_write(flock)) { /* check owner in lock */ - if (flock->fl_file != filp) { + if (flock->c.flc_file != filp) { error = 1; pr_err("not allow rw access by exclusive lock from other opens\n"); goto out; @@ -1838,13 +1837,13 @@ int ksmbd_vfs_copy_file_ranges(struct ksmbd_work *work, void ksmbd_vfs_posix_lock_wait(struct file_lock *flock) { - wait_event(flock->fl_wait, !flock->fl_blocker); + wait_event(flock->c.flc_wait, !flock->c.flc_blocker); } int ksmbd_vfs_posix_lock_wait_timeout(struct file_lock *flock, long timeout) { - return wait_event_interruptible_timeout(flock->fl_wait, - !flock->fl_blocker, + return wait_event_interruptible_timeout(flock->c.flc_wait, + !flock->c.flc_blocker, timeout); } -- cgit v1.2.3 From c69ff4071935f946f1cddc59e1d36a03442ed015 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 31 Jan 2024 18:02:28 -0500 Subject: filelock: split leases out of struct file_lock Add a new struct file_lease and move the lease-specific fields from struct file_lock to it. Convert the appropriate API calls to take struct file_lease instead, and convert the callers to use them. There is zero overlap between the lock manager operations for file locks and the ones for file leases, so split the lease-related operations off into a new lease_manager_operations struct. Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240131-flsplit-v3-47-c6129007ee8d@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/libfs.c | 2 +- fs/locks.c | 123 ++++++++++++++++++++++++++-------------- fs/nfs/nfs4_fs.h | 2 +- fs/nfs/nfs4file.c | 2 +- fs/nfs/nfs4proc.c | 4 +- fs/nfsd/nfs4layouts.c | 17 +++--- fs/nfsd/nfs4state.c | 27 ++++----- fs/smb/client/cifsfs.c | 2 +- include/linux/filelock.h | 49 ++++++++++------ include/linux/fs.h | 5 +- include/trace/events/filelock.h | 18 +++--- 11 files changed, 153 insertions(+), 98 deletions(-) (limited to 'fs') diff --git a/fs/libfs.c b/fs/libfs.c index eec6031b0155..8b67cb4655d5 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -1580,7 +1580,7 @@ EXPORT_SYMBOL(alloc_anon_inode); * All arguments are ignored and it just returns -EINVAL. */ int -simple_nosetlease(struct file *filp, int arg, struct file_lock **flp, +simple_nosetlease(struct file *filp, int arg, struct file_lease **flp, void **priv) { return -EINVAL; diff --git a/fs/locks.c b/fs/locks.c index 1a4b01203d3d..33c7f4a8c729 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -74,12 +74,17 @@ static struct file_lock *file_lock(struct file_lock_core *flc) return container_of(flc, struct file_lock, c); } -static bool lease_breaking(struct file_lock *fl) +static struct file_lease *file_lease(struct file_lock_core *flc) +{ + return container_of(flc, struct file_lease, c); +} + +static bool lease_breaking(struct file_lease *fl) { return fl->c.flc_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING); } -static int target_leasetype(struct file_lock *fl) +static int target_leasetype(struct file_lease *fl) { if (fl->c.flc_flags & FL_UNLOCK_PENDING) return F_UNLCK; @@ -166,6 +171,7 @@ static DEFINE_SPINLOCK(blocked_lock_lock); static struct kmem_cache *flctx_cache __ro_after_init; static struct kmem_cache *filelock_cache __ro_after_init; +static struct kmem_cache *filelease_cache __ro_after_init; static struct file_lock_context * locks_get_lock_context(struct inode *inode, int type) @@ -275,6 +281,18 @@ struct file_lock *locks_alloc_lock(void) } EXPORT_SYMBOL_GPL(locks_alloc_lock); +/* Allocate an empty lock structure. */ +struct file_lease *locks_alloc_lease(void) +{ + struct file_lease *fl = kmem_cache_zalloc(filelease_cache, GFP_KERNEL); + + if (fl) + locks_init_lock_heads(&fl->c); + + return fl; +} +EXPORT_SYMBOL_GPL(locks_alloc_lease); + void locks_release_private(struct file_lock *fl) { struct file_lock_core *flc = &fl->c; @@ -336,15 +354,25 @@ void locks_free_lock(struct file_lock *fl) } EXPORT_SYMBOL(locks_free_lock); +/* Free a lease which is not in use. */ +void locks_free_lease(struct file_lease *fl) +{ + kmem_cache_free(filelease_cache, fl); +} +EXPORT_SYMBOL(locks_free_lease); + static void locks_dispose_list(struct list_head *dispose) { - struct file_lock *fl; + struct file_lock_core *flc; while (!list_empty(dispose)) { - fl = list_first_entry(dispose, struct file_lock, c.flc_list); - list_del_init(&fl->c.flc_list); - locks_free_lock(fl); + flc = list_first_entry(dispose, struct file_lock_core, flc_list); + list_del_init(&flc->flc_list); + if (flc->flc_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT)) + locks_free_lease(file_lease(flc)); + else + locks_free_lock(file_lock(flc)); } } @@ -355,6 +383,13 @@ void locks_init_lock(struct file_lock *fl) } EXPORT_SYMBOL(locks_init_lock); +void locks_init_lease(struct file_lease *fl) +{ + memset(fl, 0, sizeof(*fl)); + locks_init_lock_heads(&fl->c); +} +EXPORT_SYMBOL(locks_init_lease); + /* * Initialize a new lock from an existing file_lock structure. */ @@ -518,14 +553,14 @@ static int flock_to_posix_lock(struct file *filp, struct file_lock *fl, /* default lease lock manager operations */ static bool -lease_break_callback(struct file_lock *fl) +lease_break_callback(struct file_lease *fl) { kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG); return false; } static void -lease_setup(struct file_lock *fl, void **priv) +lease_setup(struct file_lease *fl, void **priv) { struct file *filp = fl->c.flc_file; struct fasync_struct *fa = *priv; @@ -541,7 +576,7 @@ lease_setup(struct file_lock *fl, void **priv) __f_setown(filp, task_pid(current), PIDTYPE_TGID, 0); } -static const struct lock_manager_operations lease_manager_ops = { +static const struct lease_manager_operations lease_manager_ops = { .lm_break = lease_break_callback, .lm_change = lease_modify, .lm_setup = lease_setup, @@ -550,7 +585,7 @@ static const struct lock_manager_operations lease_manager_ops = { /* * Initialize a lease, use the default lock manager operations */ -static int lease_init(struct file *filp, int type, struct file_lock *fl) +static int lease_init(struct file *filp, int type, struct file_lease *fl) { if (assign_type(&fl->c, type) != 0) return -EINVAL; @@ -560,17 +595,14 @@ static int lease_init(struct file *filp, int type, struct file_lock *fl) fl->c.flc_file = filp; fl->c.flc_flags = FL_LEASE; - fl->fl_start = 0; - fl->fl_end = OFFSET_MAX; - fl->fl_ops = NULL; fl->fl_lmops = &lease_manager_ops; return 0; } /* Allocate a file_lock initialised to this type of lease */ -static struct file_lock *lease_alloc(struct file *filp, int type) +static struct file_lease *lease_alloc(struct file *filp, int type) { - struct file_lock *fl = locks_alloc_lock(); + struct file_lease *fl = locks_alloc_lease(); int error = -ENOMEM; if (fl == NULL) @@ -578,7 +610,7 @@ static struct file_lock *lease_alloc(struct file *filp, int type) error = lease_init(filp, type, fl); if (error) { - locks_free_lock(fl); + locks_free_lease(fl); return ERR_PTR(error); } return fl; @@ -1395,7 +1427,7 @@ static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl) return error; } -static void lease_clear_pending(struct file_lock *fl, int arg) +static void lease_clear_pending(struct file_lease *fl, int arg) { switch (arg) { case F_UNLCK: @@ -1407,7 +1439,7 @@ static void lease_clear_pending(struct file_lock *fl, int arg) } /* We already had a lease on this file; just change its type */ -int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose) +int lease_modify(struct file_lease *fl, int arg, struct list_head *dispose) { int error = assign_type(&fl->c, arg); @@ -1442,7 +1474,7 @@ static bool past_time(unsigned long then) static void time_out_leases(struct inode *inode, struct list_head *dispose) { struct file_lock_context *ctx = inode->i_flctx; - struct file_lock *fl, *tmp; + struct file_lease *fl, *tmp; lockdep_assert_held(&ctx->flc_lock); @@ -1458,8 +1490,8 @@ static void time_out_leases(struct inode *inode, struct list_head *dispose) static bool leases_conflict(struct file_lock_core *lc, struct file_lock_core *bc) { bool rc; - struct file_lock *lease = file_lock(lc); - struct file_lock *breaker = file_lock(bc); + struct file_lease *lease = file_lease(lc); + struct file_lease *breaker = file_lease(bc); if (lease->fl_lmops->lm_breaker_owns_lease && lease->fl_lmops->lm_breaker_owns_lease(lease)) @@ -1480,7 +1512,7 @@ trace: } static bool -any_leases_conflict(struct inode *inode, struct file_lock *breaker) +any_leases_conflict(struct inode *inode, struct file_lease *breaker) { struct file_lock_context *ctx = inode->i_flctx; struct file_lock_core *flc; @@ -1511,7 +1543,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) { int error = 0; struct file_lock_context *ctx; - struct file_lock *new_fl, *fl, *tmp; + struct file_lease *new_fl, *fl, *tmp; unsigned long break_time; int want_write = (mode & O_ACCMODE) != O_RDONLY; LIST_HEAD(dispose); @@ -1571,7 +1603,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) } restart: - fl = list_first_entry(&ctx->flc_lease, struct file_lock, c.flc_list); + fl = list_first_entry(&ctx->flc_lease, struct file_lease, c.flc_list); break_time = fl->fl_break_time; if (break_time != 0) break_time -= jiffies; @@ -1590,7 +1622,7 @@ restart: percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); trace_break_lease_unblock(inode, new_fl); - locks_delete_block(new_fl); + __locks_delete_block(&new_fl->c); if (error >= 0) { /* * Wait for the next conflicting lease that has not been @@ -1607,7 +1639,7 @@ out: percpu_up_read(&file_rwsem); locks_dispose_list(&dispose); free_lock: - locks_free_lock(new_fl); + locks_free_lease(new_fl); return error; } EXPORT_SYMBOL(__break_lease); @@ -1625,14 +1657,14 @@ void lease_get_mtime(struct inode *inode, struct timespec64 *time) { bool has_lease = false; struct file_lock_context *ctx; - struct file_lock *fl; + struct file_lock_core *flc; ctx = locks_inode_context(inode); if (ctx && !list_empty_careful(&ctx->flc_lease)) { spin_lock(&ctx->flc_lock); - fl = list_first_entry_or_null(&ctx->flc_lease, - struct file_lock, c.flc_list); - if (fl && lock_is_write(fl)) + flc = list_first_entry_or_null(&ctx->flc_lease, + struct file_lock_core, flc_list); + if (flc && flc->flc_type == F_WRLCK) has_lease = true; spin_unlock(&ctx->flc_lock); } @@ -1667,7 +1699,7 @@ EXPORT_SYMBOL(lease_get_mtime); */ int fcntl_getlease(struct file *filp) { - struct file_lock *fl; + struct file_lease *fl; struct inode *inode = file_inode(filp); struct file_lock_context *ctx; int type = F_UNLCK; @@ -1739,9 +1771,9 @@ check_conflicting_open(struct file *filp, const int arg, int flags) } static int -generic_add_lease(struct file *filp, int arg, struct file_lock **flp, void **priv) +generic_add_lease(struct file *filp, int arg, struct file_lease **flp, void **priv) { - struct file_lock *fl, *my_fl = NULL, *lease; + struct file_lease *fl, *my_fl = NULL, *lease; struct inode *inode = file_inode(filp); struct file_lock_context *ctx; bool is_deleg = (*flp)->c.flc_flags & FL_DELEG; @@ -1850,7 +1882,7 @@ out: static int generic_delete_lease(struct file *filp, void *owner) { int error = -EAGAIN; - struct file_lock *fl, *victim = NULL; + struct file_lease *fl, *victim = NULL; struct inode *inode = file_inode(filp); struct file_lock_context *ctx; LIST_HEAD(dispose); @@ -1890,7 +1922,7 @@ static int generic_delete_lease(struct file *filp, void *owner) * The (input) flp->fl_lmops->lm_break function is required * by break_lease(). */ -int generic_setlease(struct file *filp, int arg, struct file_lock **flp, +int generic_setlease(struct file *filp, int arg, struct file_lease **flp, void **priv) { struct inode *inode = file_inode(filp); @@ -1937,7 +1969,7 @@ lease_notifier_chain_init(void) } static inline void -setlease_notifier(int arg, struct file_lock *lease) +setlease_notifier(int arg, struct file_lease *lease) { if (arg != F_UNLCK) srcu_notifier_call_chain(&lease_notifier_chain, arg, lease); @@ -1973,7 +2005,7 @@ EXPORT_SYMBOL_GPL(lease_unregister_notifier); * may be NULL if the lm_setup operation doesn't require it. */ int -vfs_setlease(struct file *filp, int arg, struct file_lock **lease, void **priv) +vfs_setlease(struct file *filp, int arg, struct file_lease **lease, void **priv) { if (lease) setlease_notifier(arg, *lease); @@ -1986,7 +2018,7 @@ EXPORT_SYMBOL_GPL(vfs_setlease); static int do_fcntl_add_lease(unsigned int fd, struct file *filp, int arg) { - struct file_lock *fl; + struct file_lease *fl; struct fasync_struct *new; int error; @@ -1996,14 +2028,14 @@ static int do_fcntl_add_lease(unsigned int fd, struct file *filp, int arg) new = fasync_alloc(); if (!new) { - locks_free_lock(fl); + locks_free_lease(fl); return -ENOMEM; } new->fa_fd = fd; error = vfs_setlease(filp, arg, &fl, (void **)&new); if (fl) - locks_free_lock(fl); + locks_free_lease(fl); if (new) fasync_free(new); return error; @@ -2626,7 +2658,7 @@ locks_remove_flock(struct file *filp, struct file_lock_context *flctx) static void locks_remove_lease(struct file *filp, struct file_lock_context *ctx) { - struct file_lock *fl, *tmp; + struct file_lease *fl, *tmp; LIST_HEAD(dispose); if (list_empty(&ctx->flc_lease)) @@ -2755,14 +2787,16 @@ static void lock_get_status(struct seq_file *f, struct file_lock_core *flc, } else if (flc->flc_flags & FL_FLOCK) { seq_puts(f, "FLOCK ADVISORY "); } else if (flc->flc_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT)) { - type = target_leasetype(fl); + struct file_lease *lease = file_lease(flc); + + type = target_leasetype(lease); if (flc->flc_flags & FL_DELEG) seq_puts(f, "DELEG "); else seq_puts(f, "LEASE "); - if (lease_breaking(fl)) + if (lease_breaking(lease)) seq_puts(f, "BREAKING "); else if (flc->flc_file) seq_puts(f, "ACTIVE "); @@ -2945,6 +2979,9 @@ static int __init filelock_init(void) filelock_cache = kmem_cache_create("file_lock_cache", sizeof(struct file_lock), 0, SLAB_PANIC, NULL); + filelease_cache = kmem_cache_create("file_lock_cache", + sizeof(struct file_lease), 0, SLAB_PANIC, NULL); + for_each_possible_cpu(i) { struct file_lock_list_struct *fll = per_cpu_ptr(&file_lock_list, i); diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 581698f1b7b2..6ff41ceb9f1c 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -330,7 +330,7 @@ extern int update_open_stateid(struct nfs4_state *state, const nfs4_stateid *deleg_stateid, fmode_t fmode); extern int nfs4_proc_setlease(struct file *file, int arg, - struct file_lock **lease, void **priv); + struct file_lease **lease, void **priv); extern int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo); extern void nfs4_update_changeattr(struct inode *dir, diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index e238abc78a13..1cd9652f3c28 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -439,7 +439,7 @@ void nfs42_ssc_unregister_ops(void) } #endif /* CONFIG_NFS_V4_2 */ -static int nfs4_setlease(struct file *file, int arg, struct file_lock **lease, +static int nfs4_setlease(struct file *file, int arg, struct file_lease **lease, void **priv) { return nfs4_proc_setlease(file, arg, lease, priv); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 91dddcd79004..815996cb27fc 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -7604,7 +7604,7 @@ static int nfs4_delete_lease(struct file *file, void **priv) return generic_setlease(file, F_UNLCK, NULL, priv); } -static int nfs4_add_lease(struct file *file, int arg, struct file_lock **lease, +static int nfs4_add_lease(struct file *file, int arg, struct file_lease **lease, void **priv) { struct inode *inode = file_inode(file); @@ -7622,7 +7622,7 @@ static int nfs4_add_lease(struct file *file, int arg, struct file_lock **lease, return -EAGAIN; } -int nfs4_proc_setlease(struct file *file, int arg, struct file_lock **lease, +int nfs4_proc_setlease(struct file *file, int arg, struct file_lease **lease, void **priv) { switch (arg) { diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c index daae68e526e0..4fa21b74a981 100644 --- a/fs/nfsd/nfs4layouts.c +++ b/fs/nfsd/nfs4layouts.c @@ -25,7 +25,7 @@ static struct kmem_cache *nfs4_layout_cache; static struct kmem_cache *nfs4_layout_stateid_cache; static const struct nfsd4_callback_ops nfsd4_cb_layout_ops; -static const struct lock_manager_operations nfsd4_layouts_lm_ops; +static const struct lease_manager_operations nfsd4_layouts_lm_ops; const struct nfsd4_layout_ops *nfsd4_layout_ops[LAYOUT_TYPE_MAX] = { #ifdef CONFIG_NFSD_FLEXFILELAYOUT @@ -182,20 +182,19 @@ nfsd4_free_layout_stateid(struct nfs4_stid *stid) static int nfsd4_layout_setlease(struct nfs4_layout_stateid *ls) { - struct file_lock *fl; + struct file_lease *fl; int status; if (nfsd4_layout_ops[ls->ls_layout_type]->disable_recalls) return 0; - fl = locks_alloc_lock(); + fl = locks_alloc_lease(); if (!fl) return -ENOMEM; - locks_init_lock(fl); + locks_init_lease(fl); fl->fl_lmops = &nfsd4_layouts_lm_ops; fl->c.flc_flags = FL_LAYOUT; fl->c.flc_type = F_RDLCK; - fl->fl_end = OFFSET_MAX; fl->c.flc_owner = ls; fl->c.flc_pid = current->tgid; fl->c.flc_file = ls->ls_file->nf_file; @@ -203,7 +202,7 @@ nfsd4_layout_setlease(struct nfs4_layout_stateid *ls) status = vfs_setlease(fl->c.flc_file, fl->c.flc_type, &fl, NULL); if (status) { - locks_free_lock(fl); + locks_free_lease(fl); return status; } BUG_ON(fl != NULL); @@ -724,7 +723,7 @@ static const struct nfsd4_callback_ops nfsd4_cb_layout_ops = { }; static bool -nfsd4_layout_lm_break(struct file_lock *fl) +nfsd4_layout_lm_break(struct file_lease *fl) { /* * We don't want the locks code to timeout the lease for us; @@ -737,14 +736,14 @@ nfsd4_layout_lm_break(struct file_lock *fl) } static int -nfsd4_layout_lm_change(struct file_lock *onlist, int arg, +nfsd4_layout_lm_change(struct file_lease *onlist, int arg, struct list_head *dispose) { BUG_ON(!(arg & F_UNLCK)); return lease_modify(onlist, arg, dispose); } -static const struct lock_manager_operations nfsd4_layouts_lm_ops = { +static const struct lease_manager_operations nfsd4_layouts_lm_ops = { .lm_break = nfsd4_layout_lm_break, .lm_change = nfsd4_layout_lm_change, }; diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 5d9ba9b9cff9..b2c8efb5f793 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -4922,7 +4922,7 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp) /* Called from break_lease() with flc_lock held. */ static bool -nfsd_break_deleg_cb(struct file_lock *fl) +nfsd_break_deleg_cb(struct file_lease *fl) { struct nfs4_delegation *dp = (struct nfs4_delegation *) fl->c.flc_owner; struct nfs4_file *fp = dp->dl_stid.sc_file; @@ -4960,7 +4960,7 @@ nfsd_break_deleg_cb(struct file_lock *fl) * %true: Lease conflict was resolved * %false: Lease conflict was not resolved. */ -static bool nfsd_breaker_owns_lease(struct file_lock *fl) +static bool nfsd_breaker_owns_lease(struct file_lease *fl) { struct nfs4_delegation *dl = fl->c.flc_owner; struct svc_rqst *rqst; @@ -4977,7 +4977,7 @@ static bool nfsd_breaker_owns_lease(struct file_lock *fl) } static int -nfsd_change_deleg_cb(struct file_lock *onlist, int arg, +nfsd_change_deleg_cb(struct file_lease *onlist, int arg, struct list_head *dispose) { struct nfs4_delegation *dp = (struct nfs4_delegation *) onlist->c.flc_owner; @@ -4991,7 +4991,7 @@ nfsd_change_deleg_cb(struct file_lock *onlist, int arg, return -EAGAIN; } -static const struct lock_manager_operations nfsd_lease_mng_ops = { +static const struct lease_manager_operations nfsd_lease_mng_ops = { .lm_breaker_owns_lease = nfsd_breaker_owns_lease, .lm_break = nfsd_break_deleg_cb, .lm_change = nfsd_change_deleg_cb, @@ -5331,18 +5331,17 @@ static bool nfsd4_cb_channel_good(struct nfs4_client *clp) return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; } -static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, +static struct file_lease *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag) { - struct file_lock *fl; + struct file_lease *fl; - fl = locks_alloc_lock(); + fl = locks_alloc_lease(); if (!fl) return NULL; fl->fl_lmops = &nfsd_lease_mng_ops; fl->c.flc_flags = FL_DELEG; fl->c.flc_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; - fl->fl_end = OFFSET_MAX; fl->c.flc_owner = (fl_owner_t)dp; fl->c.flc_pid = current->tgid; fl->c.flc_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file; @@ -5463,7 +5462,7 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp, struct nfs4_clnt_odstate *odstate = stp->st_clnt_odstate; struct nfs4_delegation *dp; struct nfsd_file *nf = NULL; - struct file_lock *fl; + struct file_lease *fl; u32 dl_type; /* @@ -5536,7 +5535,7 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp, status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->c.flc_type, &fl, NULL); if (fl) - locks_free_lock(fl); + locks_free_lease(fl); if (status) goto out_clnt_odstate; @@ -8449,7 +8448,7 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode) { __be32 status; struct file_lock_context *ctx; - struct file_lock *fl; + struct file_lease *fl; struct nfs4_delegation *dp; ctx = locks_inode_context(inode); @@ -8457,6 +8456,8 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode) return 0; spin_lock(&ctx->flc_lock); for_each_file_lock(fl, &ctx->flc_lease) { + unsigned char type = fl->c.flc_type; + if (fl->c.flc_flags == FL_LAYOUT) continue; if (fl->fl_lmops != &nfsd_lease_mng_ops) { @@ -8465,11 +8466,11 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode) * we are done; there isn't any write delegation * on this inode */ - if (lock_is_read(fl)) + if (type == F_RDLCK) break; goto break_lease; } - if (lock_is_write(fl)) { + if (type == F_WRLCK) { dp = fl->c.flc_owner; if (dp->dl_recall.cb_clp == *(rqstp->rq_lease_breaker)) { spin_unlock(&ctx->flc_lock); diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c index e902de4e475a..5eee5b00547f 100644 --- a/fs/smb/client/cifsfs.c +++ b/fs/smb/client/cifsfs.c @@ -1085,7 +1085,7 @@ static loff_t cifs_llseek(struct file *file, loff_t offset, int whence) } static int -cifs_setlease(struct file *file, int arg, struct file_lock **lease, void **priv) +cifs_setlease(struct file *file, int arg, struct file_lease **lease, void **priv) { /* * Note that this is called by vfs setlease with i_lock held to diff --git a/include/linux/filelock.h b/include/linux/filelock.h index 0606e806da76..553d65a88048 100644 --- a/include/linux/filelock.h +++ b/include/linux/filelock.h @@ -27,6 +27,7 @@ #define FILE_LOCK_DEFERRED 1 struct file_lock; +struct file_lease; struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); @@ -39,14 +40,17 @@ struct lock_manager_operations { void (*lm_put_owner)(fl_owner_t); void (*lm_notify)(struct file_lock *); /* unblock callback */ int (*lm_grant)(struct file_lock *, int); - bool (*lm_break)(struct file_lock *); - int (*lm_change)(struct file_lock *, int, struct list_head *); - void (*lm_setup)(struct file_lock *, void **); - bool (*lm_breaker_owns_lease)(struct file_lock *); bool (*lm_lock_expirable)(struct file_lock *cfl); void (*lm_expire_lock)(void); }; +struct lease_manager_operations { + bool (*lm_break)(struct file_lease *); + int (*lm_change)(struct file_lease *, int, struct list_head *); + void (*lm_setup)(struct file_lease *, void **); + bool (*lm_breaker_owns_lease)(struct file_lease *); +}; + struct lock_manager { struct list_head list; /* @@ -110,11 +114,6 @@ struct file_lock { loff_t fl_start; loff_t fl_end; - struct fasync_struct * fl_fasync; /* for lease break notifications */ - /* for lease breaks: */ - unsigned long fl_break_time; - unsigned long fl_downgrade_time; - const struct file_lock_operations *fl_ops; /* Callbacks for filesystems */ const struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */ union { @@ -131,6 +130,15 @@ struct file_lock { } fl_u; } __randomize_layout; +struct file_lease { + struct file_lock_core c; + struct fasync_struct * fl_fasync; /* for lease break notifications */ + /* for lease breaks: */ + unsigned long fl_break_time; + unsigned long fl_downgrade_time; + const struct lease_manager_operations *fl_lmops; /* Callbacks for lease managers */ +} __randomize_layout; + struct file_lock_context { spinlock_t flc_lock; struct list_head flc_flock; @@ -179,7 +187,7 @@ static inline void locks_wake_up(struct file_lock *fl) void locks_free_lock_context(struct inode *inode); void locks_free_lock(struct file_lock *fl); void locks_init_lock(struct file_lock *); -struct file_lock * locks_alloc_lock(void); +struct file_lock *locks_alloc_lock(void); void locks_copy_lock(struct file_lock *, struct file_lock *); void locks_copy_conflock(struct file_lock *, struct file_lock *); void locks_remove_posix(struct file *, fl_owner_t); @@ -193,11 +201,15 @@ int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_l int vfs_cancel_lock(struct file *filp, struct file_lock *fl); bool vfs_inode_has_locks(struct inode *inode); int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl); + +void locks_init_lease(struct file_lease *); +void locks_free_lease(struct file_lease *fl); +struct file_lease *locks_alloc_lease(void); int __break_lease(struct inode *inode, unsigned int flags, unsigned int type); void lease_get_mtime(struct inode *, struct timespec64 *time); -int generic_setlease(struct file *, int, struct file_lock **, void **priv); -int vfs_setlease(struct file *, int, struct file_lock **, void **); -int lease_modify(struct file_lock *, int, struct list_head *); +int generic_setlease(struct file *, int, struct file_lease **, void **priv); +int vfs_setlease(struct file *, int, struct file_lease **, void **); +int lease_modify(struct file_lease *, int, struct list_head *); struct notifier_block; int lease_register_notifier(struct notifier_block *); @@ -282,6 +294,11 @@ static inline void locks_init_lock(struct file_lock *fl) return; } +static inline void locks_init_lease(struct file_lease *fl) +{ + return; +} + static inline void locks_copy_conflock(struct file_lock *new, struct file_lock *fl) { return; @@ -356,18 +373,18 @@ static inline void lease_get_mtime(struct inode *inode, } static inline int generic_setlease(struct file *filp, int arg, - struct file_lock **flp, void **priv) + struct file_lease **flp, void **priv) { return -EINVAL; } static inline int vfs_setlease(struct file *filp, int arg, - struct file_lock **lease, void **priv) + struct file_lease **lease, void **priv) { return -EINVAL; } -static inline int lease_modify(struct file_lock *fl, int arg, +static inline int lease_modify(struct file_lease *fl, int arg, struct list_head *dispose) { return -EINVAL; diff --git a/include/linux/fs.h b/include/linux/fs.h index ed5966a70495..162877197bf1 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1064,6 +1064,7 @@ struct file *get_file_active(struct file **f); typedef void *fl_owner_t; struct file_lock; +struct file_lease; /* The following constant reflects the upper bound of the file/locking space */ #ifndef OFFSET_MAX @@ -2005,7 +2006,7 @@ struct file_operations { ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); void (*splice_eof)(struct file *file); - int (*setlease)(struct file *, int, struct file_lock **, void **); + int (*setlease)(struct file *, int, struct file_lease **, void **); long (*fallocate)(struct file *file, int mode, loff_t offset, loff_t len); void (*show_fdinfo)(struct seq_file *m, struct file *f); @@ -3238,7 +3239,7 @@ extern int simple_write_begin(struct file *file, struct address_space *mapping, extern const struct address_space_operations ram_aops; extern int always_delete_dentry(const struct dentry *); extern struct inode *alloc_anon_inode(struct super_block *); -extern int simple_nosetlease(struct file *, int, struct file_lock **, void **); +extern int simple_nosetlease(struct file *, int, struct file_lease **, void **); extern const struct dentry_operations simple_dentry_operations; extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags); diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h index c778061c6249..b8d1e00a7982 100644 --- a/include/trace/events/filelock.h +++ b/include/trace/events/filelock.h @@ -117,12 +117,12 @@ DEFINE_EVENT(filelock_lock, flock_lock_inode, TP_ARGS(inode, fl, ret)); DECLARE_EVENT_CLASS(filelock_lease, - TP_PROTO(struct inode *inode, struct file_lock *fl), + TP_PROTO(struct inode *inode, struct file_lease *fl), TP_ARGS(inode, fl), TP_STRUCT__entry( - __field(struct file_lock *, fl) + __field(struct file_lease *, fl) __field(unsigned long, i_ino) __field(dev_t, s_dev) __field(struct file_lock_core *, blocker) @@ -153,23 +153,23 @@ DECLARE_EVENT_CLASS(filelock_lease, __entry->break_time, __entry->downgrade_time) ); -DEFINE_EVENT(filelock_lease, break_lease_noblock, TP_PROTO(struct inode *inode, struct file_lock *fl), +DEFINE_EVENT(filelock_lease, break_lease_noblock, TP_PROTO(struct inode *inode, struct file_lease *fl), TP_ARGS(inode, fl)); -DEFINE_EVENT(filelock_lease, break_lease_block, TP_PROTO(struct inode *inode, struct file_lock *fl), +DEFINE_EVENT(filelock_lease, break_lease_block, TP_PROTO(struct inode *inode, struct file_lease *fl), TP_ARGS(inode, fl)); -DEFINE_EVENT(filelock_lease, break_lease_unblock, TP_PROTO(struct inode *inode, struct file_lock *fl), +DEFINE_EVENT(filelock_lease, break_lease_unblock, TP_PROTO(struct inode *inode, struct file_lease *fl), TP_ARGS(inode, fl)); -DEFINE_EVENT(filelock_lease, generic_delete_lease, TP_PROTO(struct inode *inode, struct file_lock *fl), +DEFINE_EVENT(filelock_lease, generic_delete_lease, TP_PROTO(struct inode *inode, struct file_lease *fl), TP_ARGS(inode, fl)); -DEFINE_EVENT(filelock_lease, time_out_leases, TP_PROTO(struct inode *inode, struct file_lock *fl), +DEFINE_EVENT(filelock_lease, time_out_leases, TP_PROTO(struct inode *inode, struct file_lease *fl), TP_ARGS(inode, fl)); TRACE_EVENT(generic_add_lease, - TP_PROTO(struct inode *inode, struct file_lock *fl), + TP_PROTO(struct inode *inode, struct file_lease *fl), TP_ARGS(inode, fl), @@ -204,7 +204,7 @@ TRACE_EVENT(generic_add_lease, ); TRACE_EVENT(leases_conflict, - TP_PROTO(bool conflict, struct file_lock *lease, struct file_lock *breaker), + TP_PROTO(bool conflict, struct file_lease *lease, struct file_lease *breaker), TP_ARGS(conflict, lease, breaker), -- cgit v1.2.3 From 7b8001013d720c232ad9ae7aae0ef0e7c281c6d4 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 5 Feb 2024 07:09:31 -0500 Subject: filelock: don't do security checks on nfsd setlease calls MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Zdenek reported seeing some AVC denials due to nfsd trying to set delegations: type=AVC msg=audit(09.11.2023 09:03:46.411:496) : avc: denied { lease } for pid=5127 comm=rpc.nfsd capability=lease scontext=system_u:system_r:nfsd_t:s0 tcontext=system_u:system_r:nfsd_t:s0 tclass=capability permissive=0 When setting delegations on behalf of nfsd, we don't want to do all of the normal capabilty and LSM checks. nfsd is a kernel thread and runs with CAP_LEASE set, so the uid checks end up being a no-op in most cases anyway. Some nfsd functions can end up running in normal process context when tearing down the server. At that point, the CAP_LEASE check can fail and cause the client to not tear down delegations when expected. Also, the way the per-fs ->setlease handlers work today is a little convoluted. The non-trivial ones are wrappers around generic_setlease, so when they fail due to permission problems they usually they end up doing a little extra work only to determine that they can't set the lease anyway. It would be more efficient to do those checks earlier. Transplant the permission checking from generic_setlease to vfs_setlease, which will make the permission checking happen earlier on filesystems that have a ->setlease operation. Add a new kernel_setlease function that bypasses these checks, and switch nfsd to use that instead of vfs_setlease. There is one behavioral change here: prior this patch the setlease_notifier would fire even if the lease attempt was going to fail the security checks later. With this change, it doesn't fire until the caller has passed them. I think this is a desirable change overall. nfsd is the only user of the setlease_notifier and it doesn't benefit from being notified about failed attempts. Cc: Ondrej Mosnáček Reported-by: Zdenek Pytela Closes: https://bugzilla.redhat.com/show_bug.cgi?id=2248830 Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240205-bz2248830-v1-1-d0ec0daecba1@kernel.org Acked-by: Tom Talpey Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/locks.c | 43 +++++++++++++++++++++++++------------------ fs/nfsd/nfs4layouts.c | 5 ++--- fs/nfsd/nfs4state.c | 8 ++++---- include/linux/filelock.h | 7 +++++++ 4 files changed, 38 insertions(+), 25 deletions(-) (limited to 'fs') diff --git a/fs/locks.c b/fs/locks.c index 33c7f4a8c729..26d52ef5314a 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1925,18 +1925,6 @@ static int generic_delete_lease(struct file *filp, void *owner) int generic_setlease(struct file *filp, int arg, struct file_lease **flp, void **priv) { - struct inode *inode = file_inode(filp); - vfsuid_t vfsuid = i_uid_into_vfsuid(file_mnt_idmap(filp), inode); - int error; - - if ((!vfsuid_eq_kuid(vfsuid, current_fsuid())) && !capable(CAP_LEASE)) - return -EACCES; - if (!S_ISREG(inode->i_mode)) - return -EINVAL; - error = security_file_lock(filp, arg); - if (error) - return error; - switch (arg) { case F_UNLCK: return generic_delete_lease(filp, *priv); @@ -1987,6 +1975,19 @@ void lease_unregister_notifier(struct notifier_block *nb) } EXPORT_SYMBOL_GPL(lease_unregister_notifier); + +int +kernel_setlease(struct file *filp, int arg, struct file_lease **lease, void **priv) +{ + if (lease) + setlease_notifier(arg, *lease); + if (filp->f_op->setlease) + return filp->f_op->setlease(filp, arg, lease, priv); + else + return generic_setlease(filp, arg, lease, priv); +} +EXPORT_SYMBOL_GPL(kernel_setlease); + /** * vfs_setlease - sets a lease on an open file * @filp: file pointer @@ -2007,12 +2008,18 @@ EXPORT_SYMBOL_GPL(lease_unregister_notifier); int vfs_setlease(struct file *filp, int arg, struct file_lease **lease, void **priv) { - if (lease) - setlease_notifier(arg, *lease); - if (filp->f_op->setlease) - return filp->f_op->setlease(filp, arg, lease, priv); - else - return generic_setlease(filp, arg, lease, priv); + struct inode *inode = file_inode(filp); + vfsuid_t vfsuid = i_uid_into_vfsuid(file_mnt_idmap(filp), inode); + int error; + + if ((!vfsuid_eq_kuid(vfsuid, current_fsuid())) && !capable(CAP_LEASE)) + return -EACCES; + if (!S_ISREG(inode->i_mode)) + return -EINVAL; + error = security_file_lock(filp, arg); + if (error) + return error; + return kernel_setlease(filp, arg, lease, priv); } EXPORT_SYMBOL_GPL(vfs_setlease); diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c index 4fa21b74a981..4c0d00bdfbb1 100644 --- a/fs/nfsd/nfs4layouts.c +++ b/fs/nfsd/nfs4layouts.c @@ -170,7 +170,7 @@ nfsd4_free_layout_stateid(struct nfs4_stid *stid) spin_unlock(&fp->fi_lock); if (!nfsd4_layout_ops[ls->ls_layout_type]->disable_recalls) - vfs_setlease(ls->ls_file->nf_file, F_UNLCK, NULL, (void **)&ls); + kernel_setlease(ls->ls_file->nf_file, F_UNLCK, NULL, (void **)&ls); nfsd_file_put(ls->ls_file); if (ls->ls_recalled) @@ -199,8 +199,7 @@ nfsd4_layout_setlease(struct nfs4_layout_stateid *ls) fl->c.flc_pid = current->tgid; fl->c.flc_file = ls->ls_file->nf_file; - status = vfs_setlease(fl->c.flc_file, fl->c.flc_type, &fl, - NULL); + status = kernel_setlease(fl->c.flc_file, fl->c.flc_type, &fl, NULL); if (status) { locks_free_lease(fl); return status; diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index b2c8efb5f793..6d52ecba8e9c 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1249,7 +1249,7 @@ static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp) WARN_ON_ONCE(!fp->fi_delegees); - vfs_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp); + kernel_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp); put_deleg_file(fp); } @@ -5532,8 +5532,8 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp, if (!fl) goto out_clnt_odstate; - status = vfs_setlease(fp->fi_deleg_file->nf_file, - fl->c.flc_type, &fl, NULL); + status = kernel_setlease(fp->fi_deleg_file->nf_file, + fl->c.flc_type, &fl, NULL); if (fl) locks_free_lease(fl); if (status) @@ -5571,7 +5571,7 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp, return dp; out_unlock: - vfs_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp); + kernel_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp); out_clnt_odstate: put_clnt_odstate(dp->dl_clnt_odstate); nfs4_put_stid(&dp->dl_stid); diff --git a/include/linux/filelock.h b/include/linux/filelock.h index 553d65a88048..aabd4bdf7eba 100644 --- a/include/linux/filelock.h +++ b/include/linux/filelock.h @@ -208,6 +208,7 @@ struct file_lease *locks_alloc_lease(void); int __break_lease(struct inode *inode, unsigned int flags, unsigned int type); void lease_get_mtime(struct inode *, struct timespec64 *time); int generic_setlease(struct file *, int, struct file_lease **, void **priv); +int kernel_setlease(struct file *, int, struct file_lease **, void **); int vfs_setlease(struct file *, int, struct file_lease **, void **); int lease_modify(struct file_lease *, int, struct list_head *); @@ -378,6 +379,12 @@ static inline int generic_setlease(struct file *filp, int arg, return -EINVAL; } +static inline int kernel_setlease(struct file *filp, int arg, + struct file_lease **lease, void **priv) +{ + return -EINVAL; +} + static inline int vfs_setlease(struct file *filp, int arg, struct file_lease **lease, void **priv) { -- cgit v1.2.3 From 292fcaa1f937345cb65f3af82a1ee6692c8df9eb Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 6 Feb 2024 14:08:57 +0100 Subject: smb: remove redundant check ->setlease() is never called on non-regular files now. So remove the check from cifs_setlease(). Link: https://lore.kernel.org/r/170716318935.13976.13465352731929804157@noble.neil.brown.name Signed-off-by: NeilBrown Signed-off-by: Christian Brauner --- fs/smb/client/cifsfs.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'fs') diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c index 5eee5b00547f..cbcb98d5f2d7 100644 --- a/fs/smb/client/cifsfs.c +++ b/fs/smb/client/cifsfs.c @@ -1094,9 +1094,6 @@ cifs_setlease(struct file *file, int arg, struct file_lease **lease, void **priv struct inode *inode = file_inode(file); struct cifsFileInfo *cfile = file->private_data; - if (!(S_ISREG(inode->i_mode))) - return -EINVAL; - /* Check if file is oplocked if this is request for new lease */ if (arg == F_UNLCK || ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) || -- cgit v1.2.3 From 14786d949a3b8cf00cc32456363b7db22894a0e6 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Sun, 18 Feb 2024 08:33:28 -0500 Subject: filelock: fix deadlock detection in POSIX locking The FL_POSIX check in __locks_insert_block was inadvertantly broken recently and is now inserting only OFD locks instead of only legacy POSIX locks. This breaks deadlock detection in POSIX locks, and may also be the root cause of a performance regression noted by the kernel test robot. Restore the proper sense of the test. Fixes: b6be3714005c ("filelock: convert __locks_insert_block, conflict and deadlock checks to use file_lock_core") Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-lkp/202402181229.f8147f40-oliver.sang@intel.com Signed-off-by: Jeff Layton Link: https://lore.kernel.org/r/20240218-flsplit4-v1-1-26454fc090f2@kernel.org Reviewed-by: NeilBrown Signed-off-by: Christian Brauner --- fs/locks.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/locks.c b/fs/locks.c index 26d52ef5314a..90c8746874de 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -812,7 +812,7 @@ new_blocker: list_add_tail(&waiter->flc_blocked_member, &blocker->flc_blocked_requests); - if ((blocker->flc_flags & (FL_POSIX|FL_OFDLCK)) == (FL_POSIX|FL_OFDLCK)) + if ((blocker->flc_flags & (FL_POSIX|FL_OFDLCK)) == FL_POSIX) locks_insert_global_blocked(waiter); /* The requests in waiter->flc_blocked are known to conflict with -- cgit v1.2.3