summaryrefslogtreecommitdiff
path: root/fs/gfs2
diff options
context:
space:
mode:
authorAndreas Gruenbacher <agruenba@redhat.com>2024-03-30 04:41:48 +0100
committerAndreas Gruenbacher <agruenba@redhat.com>2024-05-28 16:59:53 +0200
commitedeb180f1c8cfe2e789109766b636430adb5a0a3 (patch)
treefa7c15bea7037f1a7a07be97b0fd3a2cba264f0a /fs/gfs2
parent1fb5f67e21805333daca4b0c96416254de96400e (diff)
downloadlwn-edeb180f1c8cfe2e789109766b636430adb5a0a3.tar.gz
lwn-edeb180f1c8cfe2e789109766b636430adb5a0a3.zip
gfs2: Rename handle_callback to request_demote
Function handle_callback() is used to request a glock demote. This often happens in response to a conflicting remote locking request and subsequent bast callback from DLM, but there are other reasons for triggering a demote request as well, such as when trying to release a glock in response to memory pressure. To clarify that, rename the function to request_demote(). Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/glock.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index bac7078e4870..c583ba05e8f5 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -61,8 +61,8 @@ struct gfs2_glock_iter {
typedef void (*glock_examiner) (struct gfs2_glock * gl);
static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
-static void handle_callback(struct gfs2_glock *gl, unsigned int state,
- unsigned long delay, bool remote);
+static void request_demote(struct gfs2_glock *gl, unsigned int state,
+ unsigned long delay, bool remote);
static struct dentry *gfs2_root;
static struct workqueue_struct *glock_workqueue;
@@ -811,7 +811,7 @@ skip_inval:
(target != LM_ST_UNLOCKED ||
test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags))) {
if (!is_system_glock(gl)) {
- handle_callback(gl, LM_ST_UNLOCKED, 0, false); /* sets demote */
+ request_demote(gl, LM_ST_UNLOCKED, 0, false);
/*
* Ordinarily, we would call dlm and its callback would call
* finish_xmote, which would call state_change() to the new state.
@@ -1459,7 +1459,7 @@ out:
}
/**
- * handle_callback - process a demote request
+ * request_demote - process a demote request
* @gl: the glock
* @state: the state the caller wants us to change to
* @delay: zero to demote immediately; otherwise pending demote
@@ -1469,8 +1469,8 @@ out:
* practise: LM_ST_SHARED and LM_ST_UNLOCKED
*/
-static void handle_callback(struct gfs2_glock *gl, unsigned int state,
- unsigned long delay, bool remote)
+static void request_demote(struct gfs2_glock *gl, unsigned int state,
+ unsigned long delay, bool remote)
{
if (delay)
set_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
@@ -1686,7 +1686,7 @@ static void __gfs2_glock_dq(struct gfs2_holder *gh)
* below.
*/
if (gh->gh_flags & GL_NOCACHE)
- handle_callback(gl, LM_ST_UNLOCKED, 0, false);
+ request_demote(gl, LM_ST_UNLOCKED, 0, false);
list_del_init(&gh->gh_list);
clear_bit(HIF_HOLDER, &gh->gh_iflags);
@@ -1933,7 +1933,7 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
if (test_bit(GLF_HAVE_REPLY, &gl->gl_flags))
delay = gl->gl_hold_time;
}
- handle_callback(gl, state, delay, true);
+ request_demote(gl, state, delay, true);
gfs2_glock_queue_work(gl, delay);
spin_unlock(&gl->gl_lockref.lock);
}
@@ -2062,7 +2062,7 @@ add_back_to_lru:
freed++;
gl->gl_lockref.count++;
if (demote_ok(gl))
- handle_callback(gl, LM_ST_UNLOCKED, 0, false);
+ request_demote(gl, LM_ST_UNLOCKED, 0, false);
gfs2_glock_queue_work(gl, 0);
spin_unlock(&gl->gl_lockref.lock);
cond_resched_lock(&lru_lock);
@@ -2205,7 +2205,7 @@ static void clear_glock(struct gfs2_glock *gl)
if (!__lockref_is_dead(&gl->gl_lockref)) {
gl->gl_lockref.count++;
if (gl->gl_state != LM_ST_UNLOCKED)
- handle_callback(gl, LM_ST_UNLOCKED, 0, false);
+ request_demote(gl, LM_ST_UNLOCKED, 0, false);
gfs2_glock_queue_work(gl, 0);
}
spin_unlock(&gl->gl_lockref.lock);