summaryrefslogtreecommitdiff
path: root/fs/btrfs/inode.c
diff options
context:
space:
mode:
authorFilipe Manana <fdmanana@suse.com>2024-05-06 13:27:29 +0100
committerDavid Sterba <dsterba@suse.com>2024-07-11 15:33:17 +0200
commite2844cce75c9e61a27dcc29f0773afe970cde296 (patch)
tree7fa42f9d311b06f1015b5f4550d1f1f4b3e429ef /fs/btrfs/inode.c
parentd25f4ec17624b1b18ff2e0a3e9c2baa71c8a86f2 (diff)
downloadlwn-e2844cce75c9e61a27dcc29f0773afe970cde296.tar.gz
lwn-e2844cce75c9e61a27dcc29f0773afe970cde296.zip
btrfs: remove inode_lock from struct btrfs_root and use xarray locks
Currently we use the spinlock inode_lock from struct btrfs_root to serialize access to two different data structures: 1) The delayed inodes xarray (struct btrfs_root::delayed_nodes); 2) The inodes xarray (struct btrfs_root::inodes). Instead of using our own lock, we can use the spinlock that is part of the xarray implementation, by using the xa_lock() and xa_unlock() APIs and using the xarray APIs with the double underscore prefix that don't take the xarray locks and assume the caller is using xa_lock() and xa_unlock(). So remove the spinlock inode_lock from struct btrfs_root and use the corresponding xarray locks. This brings 2 benefits: 1) We reduce the size of struct btrfs_root, from 1336 bytes down to 1328 bytes on a 64 bits release kernel config; 2) We reduce lock contention by not using anymore the same lock for changing two different and unrelated xarrays. Reviewed-by: Qu Wenruo <wqu@suse.com> Signed-off-by: Filipe Manana <fdmanana@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/inode.c')
-rw-r--r--fs/btrfs/inode.c18
1 files changed, 8 insertions, 10 deletions
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index e05915133fd0..2a8bc014579e 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -5509,9 +5509,7 @@ static int btrfs_add_inode_to_root(struct btrfs_inode *inode, bool prealloc)
return ret;
}
- spin_lock(&root->inode_lock);
existing = xa_store(&root->inodes, ino, inode, GFP_ATOMIC);
- spin_unlock(&root->inode_lock);
if (xa_is_err(existing)) {
ret = xa_err(existing);
@@ -5531,16 +5529,16 @@ static void btrfs_del_inode_from_root(struct btrfs_inode *inode)
struct btrfs_inode *entry;
bool empty = false;
- spin_lock(&root->inode_lock);
- entry = xa_erase(&root->inodes, btrfs_ino(inode));
+ xa_lock(&root->inodes);
+ entry = __xa_erase(&root->inodes, btrfs_ino(inode));
if (entry == inode)
empty = xa_empty(&root->inodes);
- spin_unlock(&root->inode_lock);
+ xa_unlock(&root->inodes);
if (empty && btrfs_root_refs(&root->root_item) == 0) {
- spin_lock(&root->inode_lock);
+ xa_lock(&root->inodes);
empty = xa_empty(&root->inodes);
- spin_unlock(&root->inode_lock);
+ xa_unlock(&root->inodes);
if (empty)
btrfs_add_dead_root(root);
}
@@ -10874,7 +10872,7 @@ struct btrfs_inode *btrfs_find_first_inode(struct btrfs_root *root, u64 min_ino)
struct btrfs_inode *inode;
unsigned long from = min_ino;
- spin_lock(&root->inode_lock);
+ xa_lock(&root->inodes);
while (true) {
inode = xa_find(&root->inodes, &from, ULONG_MAX, XA_PRESENT);
if (!inode)
@@ -10883,9 +10881,9 @@ struct btrfs_inode *btrfs_find_first_inode(struct btrfs_root *root, u64 min_ino)
break;
from = btrfs_ino(inode) + 1;
- cond_resched_lock(&root->inode_lock);
+ cond_resched_lock(&root->inodes.xa_lock);
}
- spin_unlock(&root->inode_lock);
+ xa_unlock(&root->inodes);
return inode;
}