summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorAndreas Gruenbacher <agruenba@redhat.com>2022-12-09 23:03:59 +0100
committerAndreas Gruenbacher <agruenba@redhat.com>2022-12-15 12:41:22 +0100
commit6b46a06100dd0e0ebe400573e94ccd09163bfd5b (patch)
tree3f1aa457d152ca0167de17b14c0a605006991ef2 /fs
parentba3e77a4a22af018d2fe4d745902b2531ca82aba (diff)
downloadlwn-6b46a06100dd0e0ebe400573e94ccd09163bfd5b.tar.gz
lwn-6b46a06100dd0e0ebe400573e94ccd09163bfd5b.zip
gfs2: Remove support for glock holder auto-demotion (2)
As a follow-up to the previous commit, move the recovery related code in __gfs2_glock_dq() to gfs2_glock_dq() where it better fits. No functional change. Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/gfs2/glock.c39
1 files changed, 20 insertions, 19 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index a4c15e86b1f5..524f3c96b9a4 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1590,29 +1590,10 @@ static inline bool needs_demote(struct gfs2_glock *gl)
static void __gfs2_glock_dq(struct gfs2_holder *gh)
{
struct gfs2_glock *gl = gh->gh_gl;
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
unsigned delay = 0;
int fast_path = 0;
/*
- * If we're in the process of file system withdraw, we cannot just
- * dequeue any glocks until our journal is recovered, lest we introduce
- * file system corruption. We need two exceptions to this rule: We need
- * to allow unlocking of nondisk glocks and the glock for our own
- * journal that needs recovery.
- */
- if (test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags) &&
- glock_blocked_by_withdraw(gl) &&
- gh->gh_gl != sdp->sd_jinode_gl) {
- sdp->sd_glock_dqs_held++;
- spin_unlock(&gl->gl_lockref.lock);
- might_sleep();
- wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY,
- TASK_UNINTERRUPTIBLE);
- spin_lock(&gl->gl_lockref.lock);
- }
-
- /*
* This holder should not be cached, so mark it for demote.
* Note: this should be done before the check for needs_demote
* below.
@@ -1654,6 +1635,7 @@ static void __gfs2_glock_dq(struct gfs2_holder *gh)
void gfs2_glock_dq(struct gfs2_holder *gh)
{
struct gfs2_glock *gl = gh->gh_gl;
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
spin_lock(&gl->gl_lockref.lock);
if (!gfs2_holder_queued(gh)) {
@@ -1663,6 +1645,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
*/
goto out;
}
+
if (list_is_first(&gh->gh_list, &gl->gl_holders) &&
!test_bit(HIF_HOLDER, &gh->gh_iflags)) {
spin_unlock(&gl->gl_lockref.lock);
@@ -1671,6 +1654,24 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
spin_lock(&gl->gl_lockref.lock);
}
+ /*
+ * If we're in the process of file system withdraw, we cannot just
+ * dequeue any glocks until our journal is recovered, lest we introduce
+ * file system corruption. We need two exceptions to this rule: We need
+ * to allow unlocking of nondisk glocks and the glock for our own
+ * journal that needs recovery.
+ */
+ if (test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags) &&
+ glock_blocked_by_withdraw(gl) &&
+ gh->gh_gl != sdp->sd_jinode_gl) {
+ sdp->sd_glock_dqs_held++;
+ spin_unlock(&gl->gl_lockref.lock);
+ might_sleep();
+ wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY,
+ TASK_UNINTERRUPTIBLE);
+ spin_lock(&gl->gl_lockref.lock);
+ }
+
__gfs2_glock_dq(gh);
out:
spin_unlock(&gl->gl_lockref.lock);