diff options
author | Alexander Aring <aahringo@redhat.com> | 2024-08-02 13:26:46 -0400 |
---|---|---|
committer | David Teigland <teigland@redhat.com> | 2024-08-08 15:15:08 -0500 |
commit | c846f732b97aa30ab91c03b0337cc0c8e27b24df (patch) | |
tree | 3e19b6dea753ed6d119126338ee0ed4944609861 | |
parent | 5be323b0c64dbecdc33b43012f927e6af82d62d3 (diff) | |
download | lwn-c846f732b97aa30ab91c03b0337cc0c8e27b24df.tar.gz lwn-c846f732b97aa30ab91c03b0337cc0c8e27b24df.zip |
dlm: move lkb xarray lookup out of lock
This patch moves the xarray lookup functionality for the lkb out of the
ls_lkbxa_lock read lock handling. We can do that as the xarray should be
possible to access lockless in case of reader like xa_load(). We confirm
under ls_lkbxa_lock that the lkb is still part of the data structure and
take a reference when its still part of ls_lkbxa to avoid being freed
after doing the lookup. To do a check if the lkb is still part of the
ls_lkbxa data structure we use a kref_read() as the last put will remove
it from the ls_lkbxa data structure and any reference taken means it is
still part of ls_lkbxa.
A similar approach was done with the DLM rsb rhashtable just with a flag
instead of the refcounter because the refcounter has a slightly
different meaning.
Signed-off-by: Alexander Aring <aahringo@redhat.com>
Signed-off-by: David Teigland <teigland@redhat.com>
-rw-r--r-- | fs/dlm/dlm_internal.h | 1 | ||||
-rw-r--r-- | fs/dlm/lock.c | 18 | ||||
-rw-r--r-- | fs/dlm/memory.c | 9 |
3 files changed, 23 insertions, 5 deletions
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index 0562099e60eb..d534a4bc162b 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -295,6 +295,7 @@ struct dlm_lkb { void *lkb_astparam; /* caller's ast arg */ struct dlm_user_args *lkb_ua; }; + struct rcu_head rcu; }; /* diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 9d3ec359d5e3..865dc70a9dfc 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -1527,11 +1527,21 @@ static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret) { struct dlm_lkb *lkb; - read_lock_bh(&ls->ls_lkbxa_lock); + rcu_read_lock(); lkb = xa_load(&ls->ls_lkbxa, lkid); - if (lkb) - kref_get(&lkb->lkb_ref); - read_unlock_bh(&ls->ls_lkbxa_lock); + if (lkb) { + /* check if lkb is still part of lkbxa under lkbxa_lock as + * the lkb_ref is tight to the lkbxa data structure, see + * __put_lkb(). + */ + read_lock_bh(&ls->ls_lkbxa_lock); + if (kref_read(&lkb->lkb_ref)) + kref_get(&lkb->lkb_ref); + else + lkb = NULL; + read_unlock_bh(&ls->ls_lkbxa_lock); + } + rcu_read_unlock(); *lkb_ret = lkb; return lkb ? 0 : -ENOENT; diff --git a/fs/dlm/memory.c b/fs/dlm/memory.c index 442898cf7185..5c35cc67aca4 100644 --- a/fs/dlm/memory.c +++ b/fs/dlm/memory.c @@ -115,8 +115,10 @@ struct dlm_lkb *dlm_allocate_lkb(void) return kmem_cache_zalloc(lkb_cache, GFP_ATOMIC); } -void dlm_free_lkb(struct dlm_lkb *lkb) +static void __free_lkb_rcu(struct rcu_head *rcu) { + struct dlm_lkb *lkb = container_of(rcu, struct dlm_lkb, rcu); + if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) { struct dlm_user_args *ua; ua = lkb->lkb_ua; @@ -129,6 +131,11 @@ void dlm_free_lkb(struct dlm_lkb *lkb) kmem_cache_free(lkb_cache, lkb); } +void dlm_free_lkb(struct dlm_lkb *lkb) +{ + call_rcu(&lkb->rcu, __free_lkb_rcu); +} + struct dlm_mhandle *dlm_allocate_mhandle(void) { return kmem_cache_alloc(mhandle_cache, GFP_ATOMIC); |