summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorAndreas Gruenbacher <agruenba@redhat.com>2024-04-12 20:16:58 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2024-05-30 10:44:24 +0300
commit71667e7f8183de19492aaa84e8480c33048c9096 (patch)
tree308ab657068b04aefd41a35e428f5da0b7f00060 /fs
parent501cd8fabf621d10bd4893e37f6ce6c20523c8ca (diff)
downloadlinux-71667e7f8183de19492aaa84e8480c33048c9096.tar.xz
gfs2: finish_xmote cleanup
[ Upstream commit 1cd28e15864054f3c48baee9eecda1c0441c48ac ] Currently, function finish_xmote() takes and releases the glock spinlock. However, all of its callers immediately take that spinlock again, so it makes more sense to take the spin lock before calling finish_xmote() already. With that, thaw_glock() is the only place that sets the GLF_HAVE_REPLY flag outside of the glock spinlock, but it also takes that spinlock immediately thereafter. Change that to set the bit when the spinlock is already held. This allows to switch from test_and_clear_bit() to test_bit() and clear_bit() in glock_work_func(). Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com> Stable-dep-of: 9947a06d29c0 ("gfs2: do_xmote fixes") Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/gfs2/glock.c21
1 files changed, 13 insertions, 8 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 1bf0d751ece0..0c719fcd4fbc 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -617,7 +617,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
struct gfs2_holder *gh;
unsigned state = ret & LM_OUT_ST_MASK;
- spin_lock(&gl->gl_lockref.lock);
trace_gfs2_glock_state_change(gl, state);
state_change(gl, state);
gh = find_first_waiter(gl);
@@ -665,7 +664,6 @@ retry:
gl->gl_target, state);
GLOCK_BUG_ON(gl, 1);
}
- spin_unlock(&gl->gl_lockref.lock);
return;
}
@@ -688,7 +686,6 @@ retry:
}
out:
clear_bit(GLF_LOCK, &gl->gl_flags);
- spin_unlock(&gl->gl_lockref.lock);
}
static bool is_system_glock(struct gfs2_glock *gl)
@@ -835,15 +832,19 @@ skip_inval:
if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
target == LM_ST_UNLOCKED &&
test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
+ spin_lock(&gl->gl_lockref.lock);
finish_xmote(gl, target);
- gfs2_glock_queue_work(gl, 0);
+ __gfs2_glock_queue_work(gl, 0);
+ spin_unlock(&gl->gl_lockref.lock);
} else if (ret) {
fs_err(sdp, "lm_lock ret %d\n", ret);
GLOCK_BUG_ON(gl, !gfs2_withdrawing_or_withdrawn(sdp));
}
} else { /* lock_nolock */
+ spin_lock(&gl->gl_lockref.lock);
finish_xmote(gl, target);
- gfs2_glock_queue_work(gl, 0);
+ __gfs2_glock_queue_work(gl, 0);
+ spin_unlock(&gl->gl_lockref.lock);
}
out:
spin_lock(&gl->gl_lockref.lock);
@@ -1099,11 +1100,12 @@ static void glock_work_func(struct work_struct *work)
struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
unsigned int drop_refs = 1;
- if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
+ spin_lock(&gl->gl_lockref.lock);
+ if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
+ clear_bit(GLF_REPLY_PENDING, &gl->gl_flags);
finish_xmote(gl, gl->gl_reply);
drop_refs++;
}
- spin_lock(&gl->gl_lockref.lock);
if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
gl->gl_state != LM_ST_UNLOCKED &&
gl->gl_demote_state != LM_ST_EXCLUSIVE) {
@@ -2176,8 +2178,11 @@ static void thaw_glock(struct gfs2_glock *gl)
return;
if (!lockref_get_not_dead(&gl->gl_lockref))
return;
+
+ spin_lock(&gl->gl_lockref.lock);
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
- gfs2_glock_queue_work(gl, 0);
+ __gfs2_glock_queue_work(gl, 0);
+ spin_unlock(&gl->gl_lockref.lock);
}
/**