summaryrefslogtreecommitdiff
path: root/fs/dlm/recoverd.c
diff options
context:
space:
mode:
authorAlexander Aring <aahringo@redhat.com>2024-04-02 15:18:09 -0400
committerDavid Teigland <teigland@redhat.com>2024-04-09 11:45:23 -0500
commit578acf9a87a87531df5b59b3799ccc1256a4bbcc (patch)
tree9354ed57d97ee78616e338e5f566ce5fe4a9a0b7 /fs/dlm/recoverd.c
parent308533b4b1d55892d939286313fb73c1527444ce (diff)
downloadlwn-578acf9a87a87531df5b59b3799ccc1256a4bbcc.tar.gz
lwn-578acf9a87a87531df5b59b3799ccc1256a4bbcc.zip
dlm: use spin_lock_bh for message processing
Use spin_lock_bh for all spinlocks involved in message processing, in preparation for softirq message processing. DLM lock requests from user space involve dlm processing in user context, in addition to the standard kernel context, necessitating bh variants. Signed-off-by: Alexander Aring <aahringo@redhat.com> Signed-off-by: David Teigland <teigland@redhat.com>
Diffstat (limited to 'fs/dlm/recoverd.c')
-rw-r--r--fs/dlm/recoverd.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c
index a11ae1da2f60..c82cc48988c6 100644
--- a/fs/dlm/recoverd.c
+++ b/fs/dlm/recoverd.c
@@ -26,7 +26,7 @@ static int dlm_create_masters_list(struct dlm_ls *ls)
struct dlm_rsb *r;
int i, error = 0;
- write_lock(&ls->ls_masters_lock);
+ write_lock_bh(&ls->ls_masters_lock);
if (!list_empty(&ls->ls_masters_list)) {
log_error(ls, "root list not empty");
error = -EINVAL;
@@ -46,7 +46,7 @@ static int dlm_create_masters_list(struct dlm_ls *ls)
spin_unlock_bh(&ls->ls_rsbtbl[i].lock);
}
out:
- write_unlock(&ls->ls_masters_lock);
+ write_unlock_bh(&ls->ls_masters_lock);
return error;
}
@@ -54,12 +54,12 @@ static void dlm_release_masters_list(struct dlm_ls *ls)
{
struct dlm_rsb *r, *safe;
- write_lock(&ls->ls_masters_lock);
+ write_lock_bh(&ls->ls_masters_lock);
list_for_each_entry_safe(r, safe, &ls->ls_masters_list, res_masters_list) {
list_del_init(&r->res_masters_list);
dlm_put_rsb(r);
}
- write_unlock(&ls->ls_masters_lock);
+ write_unlock_bh(&ls->ls_masters_lock);
}
static void dlm_create_root_list(struct dlm_ls *ls, struct list_head *root_list)
@@ -103,9 +103,9 @@ static int enable_locking(struct dlm_ls *ls, uint64_t seq)
{
int error = -EINTR;
- write_lock(&ls->ls_recv_active);
+ write_lock_bh(&ls->ls_recv_active);
- spin_lock(&ls->ls_recover_lock);
+ spin_lock_bh(&ls->ls_recover_lock);
if (ls->ls_recover_seq == seq) {
set_bit(LSFL_RUNNING, &ls->ls_flags);
/* unblocks processes waiting to enter the dlm */
@@ -113,9 +113,9 @@ static int enable_locking(struct dlm_ls *ls, uint64_t seq)
clear_bit(LSFL_RECOVER_LOCK, &ls->ls_flags);
error = 0;
}
- spin_unlock(&ls->ls_recover_lock);
+ spin_unlock_bh(&ls->ls_recover_lock);
- write_unlock(&ls->ls_recv_active);
+ write_unlock_bh(&ls->ls_recv_active);
return error;
}
@@ -349,12 +349,12 @@ static void do_ls_recovery(struct dlm_ls *ls)
struct dlm_recover *rv = NULL;
int error;
- spin_lock(&ls->ls_recover_lock);
+ spin_lock_bh(&ls->ls_recover_lock);
rv = ls->ls_recover_args;
ls->ls_recover_args = NULL;
if (rv && ls->ls_recover_seq == rv->seq)
clear_bit(LSFL_RECOVER_STOP, &ls->ls_flags);
- spin_unlock(&ls->ls_recover_lock);
+ spin_unlock_bh(&ls->ls_recover_lock);
if (rv) {
error = ls_recover(ls, rv);